@lobehub/chat 0.146.2 → 0.147.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +3 -5
- package/.github/workflows/issue-close-require.yml +1 -1
- package/.github/workflows/release.yml +1 -1
- package/.github/workflows/test.yml +3 -1
- package/.i18nrc.js +13 -8
- package/.seorc.cjs +9 -0
- package/CHANGELOG.md +73 -0
- package/README.md +25 -25
- package/README.zh-CN.md +25 -25
- package/contributing/Home.md +1 -1
- package/docs/self-hosting/advanced/analytics.mdx +12 -0
- package/docs/self-hosting/advanced/analytics.zh-CN.mdx +10 -0
- package/docs/self-hosting/advanced/authentication.mdx +19 -0
- package/docs/self-hosting/advanced/authentication.zh-CN.mdx +15 -0
- package/docs/self-hosting/advanced/sso-providers/auth0.mdx +19 -2
- package/docs/self-hosting/advanced/sso-providers/auth0.zh-CN.mdx +15 -2
- package/docs/self-hosting/advanced/sso-providers/authentik.mdx +18 -3
- package/docs/self-hosting/advanced/sso-providers/authentik.zh-CN.mdx +14 -2
- package/docs/self-hosting/advanced/sso-providers/github.mdx +13 -0
- package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +13 -3
- package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.mdx +18 -2
- package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.zh-CN.mdx +15 -2
- package/docs/self-hosting/advanced/sso-providers/zitadel.mdx +17 -2
- package/docs/self-hosting/advanced/sso-providers/zitadel.zh-CN.mdx +14 -2
- package/docs/self-hosting/advanced/upstream-sync.mdx +13 -0
- package/docs/self-hosting/advanced/upstream-sync.zh-CN.mdx +10 -0
- package/docs/self-hosting/environment-variables/analytics.mdx +15 -0
- package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +13 -0
- package/docs/self-hosting/environment-variables/auth.mdx +14 -0
- package/docs/self-hosting/environment-variables/auth.zh-CN.mdx +15 -1
- package/docs/self-hosting/environment-variables/basic.mdx +15 -0
- package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +11 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +26 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +10 -0
- package/docs/self-hosting/environment-variables.mdx +13 -2
- package/docs/self-hosting/environment-variables.zh-CN.mdx +9 -0
- package/docs/self-hosting/examples/azure-openai.mdx +12 -0
- package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +12 -0
- package/docs/self-hosting/examples/ollama.mdx +13 -0
- package/docs/self-hosting/examples/ollama.zh-CN.mdx +11 -0
- package/docs/self-hosting/faq/no-v1-suffix.mdx +12 -0
- package/docs/self-hosting/faq/no-v1-suffix.zh-CN.mdx +9 -0
- package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.mdx +14 -0
- package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/docker-compose.mdx +21 -5
- package/docs/self-hosting/platform/docker-compose.zh-CN.mdx +18 -5
- package/docs/self-hosting/platform/docker.mdx +26 -8
- package/docs/self-hosting/platform/docker.zh-CN.mdx +27 -9
- package/docs/self-hosting/platform/netlify.mdx +18 -2
- package/docs/self-hosting/platform/netlify.zh-CN.mdx +14 -1
- package/docs/self-hosting/platform/railway.mdx +12 -0
- package/docs/self-hosting/platform/railway.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/repocloud.mdx +12 -0
- package/docs/self-hosting/platform/repocloud.zh-CN.mdx +10 -0
- package/docs/self-hosting/platform/sealos.mdx +11 -0
- package/docs/self-hosting/platform/sealos.zh-CN.mdx +10 -0
- package/docs/self-hosting/platform/vercel.mdx +12 -0
- package/docs/self-hosting/platform/vercel.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/zeabur.mdx +11 -0
- package/docs/self-hosting/platform/zeabur.zh-CN.mdx +10 -0
- package/docs/self-hosting/start.mdx +12 -0
- package/docs/self-hosting/start.zh-CN.mdx +13 -0
- package/docs/usage/agents/concepts.mdx +13 -0
- package/docs/usage/agents/concepts.zh-CN.mdx +10 -0
- package/docs/usage/agents/custom-agent.mdx +16 -2
- package/docs/usage/agents/custom-agent.zh-CN.mdx +14 -2
- package/docs/usage/agents/model.mdx +11 -2
- package/docs/usage/agents/model.zh-CN.mdx +12 -0
- package/docs/usage/agents/prompt.mdx +15 -0
- package/docs/usage/agents/prompt.zh-CN.mdx +10 -0
- package/docs/usage/agents/topics.mdx +13 -0
- package/docs/usage/agents/topics.zh-CN.mdx +11 -0
- package/docs/usage/features/agent-market.mdx +11 -1
- package/docs/usage/features/agent-market.zh-CN.mdx +13 -0
- package/docs/usage/features/local-llm.mdx +12 -2
- package/docs/usage/features/local-llm.zh-CN.mdx +3 -3
- package/docs/usage/features/mobile.mdx +10 -1
- package/docs/usage/features/mobile.zh-CN.mdx +11 -0
- package/docs/usage/features/more.mdx +13 -0
- package/docs/usage/features/more.zh-CN.mdx +10 -0
- package/docs/usage/features/multi-ai-providers.mdx +14 -1
- package/docs/usage/features/multi-ai-providers.zh-CN.mdx +16 -0
- package/docs/usage/features/plugin-system.mdx +12 -1
- package/docs/usage/features/plugin-system.zh-CN.mdx +9 -0
- package/docs/usage/features/pwa.mdx +11 -1
- package/docs/usage/features/pwa.zh-CN.mdx +13 -0
- package/docs/usage/features/text-to-image.mdx +11 -1
- package/docs/usage/features/text-to-image.zh-CN.mdx +13 -0
- package/docs/usage/features/theme.mdx +12 -1
- package/docs/usage/features/theme.zh-CN.mdx +11 -0
- package/docs/usage/features/tts.mdx +14 -1
- package/docs/usage/features/tts.zh-CN.mdx +12 -0
- package/docs/usage/features/vision.mdx +11 -1
- package/docs/usage/features/vision.zh-CN.mdx +10 -0
- package/docs/usage/plugins/basic-usage.mdx +12 -0
- package/docs/usage/plugins/basic-usage.zh-CN.mdx +10 -0
- package/docs/usage/plugins/custom-plugin.mdx +12 -0
- package/docs/usage/plugins/custom-plugin.zh-CN.mdx +10 -0
- package/docs/usage/plugins/development.mdx +18 -0
- package/docs/usage/plugins/development.zh-CN.mdx +12 -0
- package/docs/usage/plugins/store.mdx +12 -0
- package/docs/usage/plugins/store.zh-CN.mdx +9 -0
- package/docs/usage/providers/groq.mdx +14 -2
- package/docs/usage/providers/groq.zh-CN.mdx +12 -2
- package/docs/usage/providers/ollama/gemma.mdx +13 -3
- package/docs/usage/providers/ollama/gemma.zh-CN.mdx +12 -3
- package/docs/usage/providers/ollama/qwen.mdx +12 -4
- package/docs/usage/providers/ollama/qwen.zh-CN.mdx +10 -3
- package/docs/usage/providers/ollama.mdx +19 -9
- package/docs/usage/providers/ollama.zh-CN.mdx +20 -9
- package/docs/usage/start.mdx +13 -2
- package/docs/usage/start.zh-CN.mdx +11 -2
- package/locales/ar/common.json +0 -26
- package/locales/ar/components.json +15 -0
- package/locales/ar/error.json +1 -52
- package/locales/ar/modelProvider.json +226 -0
- package/locales/ar/setting.json +48 -199
- package/locales/bg-BG/common.json +0 -26
- package/locales/bg-BG/components.json +15 -0
- package/locales/bg-BG/error.json +1 -52
- package/locales/bg-BG/modelProvider.json +226 -0
- package/locales/bg-BG/setting.json +48 -199
- package/locales/de-DE/common.json +0 -26
- package/locales/de-DE/components.json +15 -0
- package/locales/de-DE/error.json +1 -52
- package/locales/de-DE/modelProvider.json +226 -0
- package/locales/de-DE/setting.json +48 -199
- package/locales/en-US/common.json +0 -26
- package/locales/en-US/components.json +15 -0
- package/locales/en-US/error.json +1 -52
- package/locales/en-US/modelProvider.json +226 -0
- package/locales/en-US/setting.json +48 -199
- package/locales/es-ES/common.json +0 -26
- package/locales/es-ES/components.json +15 -0
- package/locales/es-ES/error.json +1 -52
- package/locales/es-ES/modelProvider.json +226 -0
- package/locales/es-ES/setting.json +48 -199
- package/locales/fr-FR/common.json +0 -26
- package/locales/fr-FR/components.json +15 -0
- package/locales/fr-FR/error.json +1 -52
- package/locales/fr-FR/modelProvider.json +226 -0
- package/locales/fr-FR/setting.json +48 -199
- package/locales/it-IT/common.json +0 -26
- package/locales/it-IT/components.json +15 -0
- package/locales/it-IT/error.json +1 -52
- package/locales/it-IT/modelProvider.json +226 -0
- package/locales/it-IT/setting.json +59 -210
- package/locales/ja-JP/common.json +0 -26
- package/locales/ja-JP/components.json +15 -0
- package/locales/ja-JP/error.json +1 -52
- package/locales/ja-JP/modelProvider.json +226 -0
- package/locales/ja-JP/setting.json +59 -210
- package/locales/ko-KR/common.json +0 -26
- package/locales/ko-KR/components.json +15 -0
- package/locales/ko-KR/error.json +1 -52
- package/locales/ko-KR/modelProvider.json +226 -0
- package/locales/ko-KR/setting.json +48 -199
- package/locales/nl-NL/common.json +0 -26
- package/locales/nl-NL/components.json +15 -0
- package/locales/nl-NL/error.json +4 -55
- package/locales/nl-NL/modelProvider.json +226 -0
- package/locales/nl-NL/setting.json +49 -200
- package/locales/pl-PL/common.json +0 -26
- package/locales/pl-PL/components.json +15 -0
- package/locales/pl-PL/error.json +1 -52
- package/locales/pl-PL/modelProvider.json +226 -0
- package/locales/pl-PL/setting.json +48 -199
- package/locales/pt-BR/common.json +0 -26
- package/locales/pt-BR/components.json +15 -0
- package/locales/pt-BR/error.json +1 -52
- package/locales/pt-BR/modelProvider.json +226 -0
- package/locales/pt-BR/setting.json +48 -199
- package/locales/ru-RU/common.json +0 -26
- package/locales/ru-RU/components.json +15 -0
- package/locales/ru-RU/error.json +1 -52
- package/locales/ru-RU/modelProvider.json +226 -0
- package/locales/ru-RU/setting.json +48 -199
- package/locales/tr-TR/common.json +0 -26
- package/locales/tr-TR/components.json +15 -0
- package/locales/tr-TR/error.json +1 -52
- package/locales/tr-TR/modelProvider.json +226 -0
- package/locales/tr-TR/setting.json +48 -199
- package/locales/vi-VN/common.json +0 -26
- package/locales/vi-VN/components.json +15 -0
- package/locales/vi-VN/error.json +1 -52
- package/locales/vi-VN/modelProvider.json +226 -0
- package/locales/vi-VN/setting.json +48 -199
- package/locales/zh-CN/common.json +0 -26
- package/locales/zh-CN/components.json +15 -0
- package/locales/zh-CN/error.json +2 -53
- package/locales/zh-CN/modelProvider.json +226 -0
- package/locales/zh-CN/setting.json +48 -199
- package/locales/zh-TW/common.json +0 -26
- package/locales/zh-TW/components.json +15 -0
- package/locales/zh-TW/error.json +1 -52
- package/locales/zh-TW/modelProvider.json +226 -0
- package/locales/zh-TW/setting.json +48 -199
- package/package.json +120 -116
- package/scripts/mdxWorkflow/index.ts +48 -0
- package/src/app/api/chat/[provider]/route.test.ts +4 -9
- package/src/app/api/chat/[provider]/route.ts +6 -23
- package/src/app/api/chat/{[provider]/agentRuntime.test.ts → agentRuntime.test.ts} +12 -12
- package/src/app/api/chat/{[provider]/agentRuntime.ts → agentRuntime.ts} +11 -30
- package/src/app/api/chat/auth/index.ts +42 -0
- package/src/app/api/chat/models/[provider]/route.ts +45 -0
- package/src/app/api/config/__snapshots__/route.test.ts.snap +127 -0
- package/src/app/api/config/route.test.ts +170 -0
- package/src/app/api/config/route.ts +46 -11
- package/src/app/api/plugin/gateway/route.ts +1 -1
- package/src/app/settings/llm/Azure/index.tsx +36 -43
- package/src/app/settings/llm/Bedrock/index.tsx +12 -20
- package/src/app/settings/llm/Ollama/index.tsx +2 -2
- package/src/app/settings/llm/OpenAI/index.tsx +9 -121
- package/src/app/settings/llm/OpenRouter/index.tsx +1 -1
- package/src/app/settings/llm/TogetherAI/index.tsx +0 -1
- package/src/app/settings/llm/components/ProviderConfig/index.tsx +48 -32
- package/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx +94 -0
- package/src/app/settings/llm/components/ProviderModelList/MaxTokenSlider.tsx +88 -0
- package/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx +128 -0
- package/src/app/settings/llm/components/ProviderModelList/ModelFetcher.tsx +81 -0
- package/src/app/settings/llm/components/ProviderModelList/Option.tsx +38 -0
- package/src/app/settings/llm/components/ProviderModelList/index.tsx +142 -0
- package/src/app/settings/llm/const.ts +1 -1
- package/src/app/settings/llm/index.tsx +7 -6
- package/src/app/settings/tts/TTS/index.tsx +1 -1
- package/src/components/AntdStaticMethods/index.test.tsx +43 -0
- package/src/components/ModelIcon/index.tsx +25 -7
- package/src/components/ModelSelect/index.tsx +67 -56
- package/src/config/modelProviders/anthropic.ts +6 -1
- package/src/config/modelProviders/azure.ts +42 -0
- package/src/config/modelProviders/bedrock.ts +4 -1
- package/src/config/modelProviders/google.ts +2 -14
- package/src/config/modelProviders/groq.ts +3 -0
- package/src/config/modelProviders/index.ts +19 -14
- package/src/config/modelProviders/mistral.ts +5 -0
- package/src/config/modelProviders/moonshot.ts +3 -1
- package/src/config/modelProviders/ollama.ts +6 -17
- package/src/config/modelProviders/openai.ts +9 -13
- package/src/config/modelProviders/openrouter.ts +6 -1
- package/src/config/modelProviders/perplexity.ts +2 -0
- package/src/config/modelProviders/togetherai.ts +11 -0
- package/src/config/modelProviders/zeroone.ts +3 -0
- package/src/config/modelProviders/zhipu.ts +3 -0
- package/src/config/server/provider.ts +52 -7
- package/src/const/auth.ts +0 -1
- package/src/const/{settings.ts → settings/index.ts} +31 -4
- package/src/const/url.ts +1 -1
- package/src/database/core/db.ts +14 -0
- package/src/database/schemas/user.ts +1 -25
- package/src/features/AgentSetting/AgentConfig/ModelSelect.tsx +7 -11
- package/src/features/ChatInput/STT/common.tsx +80 -78
- package/src/features/ChatInput/STT/index.tsx +8 -6
- package/src/features/Conversation/Error/APIKeyForm/Bedrock.tsx +8 -8
- package/src/features/Conversation/Error/APIKeyForm/ProviderApiKeyForm.tsx +73 -0
- package/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +74 -0
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +25 -49
- package/src/features/ModelSwitchPanel/index.tsx +40 -13
- package/src/hooks/_header.ts +5 -20
- package/src/libs/agent-runtime/BaseAI.ts +17 -1
- package/src/libs/agent-runtime/anthropic/index.ts +3 -7
- package/src/libs/agent-runtime/azureOpenai/index.test.ts +166 -0
- package/src/libs/agent-runtime/azureOpenai/index.ts +16 -8
- package/src/libs/agent-runtime/bedrock/index.test.ts +199 -22
- package/src/libs/agent-runtime/bedrock/index.ts +11 -18
- package/src/libs/agent-runtime/groq/index.test.ts +350 -0
- package/src/libs/agent-runtime/groq/index.ts +14 -77
- package/src/libs/agent-runtime/mistral/index.test.ts +25 -19
- package/src/libs/agent-runtime/mistral/index.ts +24 -86
- package/src/libs/agent-runtime/moonshot/index.test.ts +2 -2
- package/src/libs/agent-runtime/moonshot/index.ts +14 -77
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +99 -0
- package/src/libs/agent-runtime/openai/fixtures/openai-models.json +170 -0
- package/src/libs/agent-runtime/openai/index.test.ts +15 -50
- package/src/libs/agent-runtime/openai/index.ts +15 -107
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +82 -0
- package/src/libs/agent-runtime/openrouter/fixtures/models.json +62 -0
- package/src/libs/agent-runtime/openrouter/index.test.ts +25 -9
- package/src/libs/agent-runtime/openrouter/index.ts +42 -84
- package/src/libs/agent-runtime/openrouter/type.ts +28 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +12 -9
- package/src/libs/agent-runtime/togetherai/index.ts +20 -85
- package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +14 -22
- package/src/libs/agent-runtime/utils/anthropicHelpers.ts +4 -10
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +147 -0
- package/src/libs/agent-runtime/zeroone/index.test.ts +350 -0
- package/src/libs/agent-runtime/zeroone/index.ts +14 -77
- package/src/locales/default/common.ts +0 -28
- package/src/locales/default/components.ts +15 -0
- package/src/locales/default/error.ts +2 -54
- package/src/locales/default/index.ts +4 -0
- package/src/locales/default/modelProvider.ts +229 -0
- package/src/locales/default/setting.ts +51 -202
- package/src/migrations/FromV3ToV4/fixtures/azure-input-v3.json +79 -0
- package/src/migrations/FromV3ToV4/fixtures/azure-output-v4.json +75 -0
- package/src/migrations/FromV3ToV4/fixtures/ollama-input-v3.json +85 -0
- package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +86 -0
- package/src/migrations/FromV3ToV4/fixtures/openai-input-v3.json +77 -0
- package/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json +79 -0
- package/src/migrations/FromV3ToV4/fixtures/openrouter-input-v3.json +82 -0
- package/src/migrations/FromV3ToV4/fixtures/openrouter-output-v4.json +89 -0
- package/src/migrations/FromV3ToV4/fixtures/output-v4-from-v1.json +203 -0
- package/src/migrations/FromV3ToV4/index.ts +96 -0
- package/src/migrations/FromV3ToV4/migrations.test.ts +195 -0
- package/src/migrations/FromV3ToV4/types/v3.ts +59 -0
- package/src/migrations/FromV3ToV4/types/v4.ts +37 -0
- package/src/migrations/index.ts +11 -3
- package/src/services/_auth.test.ts +4 -6
- package/src/services/_auth.ts +8 -60
- package/src/services/_header.ts +5 -22
- package/src/services/_url.ts +1 -0
- package/src/services/chat.ts +16 -6
- package/src/services/global.ts +1 -1
- package/src/services/models.ts +23 -0
- package/src/services/ollama.ts +2 -2
- package/src/store/chat/slices/share/action.test.ts +113 -0
- package/src/store/chat/slices/share/action.ts +1 -1
- package/src/store/global/slices/common/action.test.ts +166 -1
- package/src/store/global/slices/common/action.ts +2 -1
- package/src/store/global/slices/settings/{action.test.ts → actions/general.test.ts} +1 -19
- package/src/store/global/slices/settings/{action.ts → actions/general.ts} +4 -19
- package/src/store/global/slices/settings/actions/index.ts +18 -0
- package/src/store/global/slices/settings/actions/llm.test.ts +60 -0
- package/src/store/global/slices/settings/actions/llm.ts +88 -0
- package/src/store/global/slices/settings/initialState.ts +3 -1
- package/src/store/global/slices/settings/reducers/customModelCard.test.ts +204 -0
- package/src/store/global/slices/settings/reducers/customModelCard.ts +64 -0
- package/src/store/global/slices/settings/selectors/index.ts +1 -0
- package/src/store/global/slices/settings/selectors/modelConfig.test.ts +189 -0
- package/src/store/global/slices/settings/selectors/modelConfig.ts +179 -0
- package/src/store/global/slices/settings/selectors/modelProvider.test.ts +47 -138
- package/src/store/global/slices/settings/selectors/modelProvider.ts +102 -243
- package/src/store/global/store.ts +1 -1
- package/src/types/llm.ts +12 -1
- package/src/types/serverConfig.ts +22 -0
- package/src/types/settings/index.ts +0 -12
- package/src/types/settings/modelProvider.ts +34 -90
- package/src/utils/client/switchLang.test.ts +34 -0
- package/src/utils/difference.test.ts +46 -0
- package/src/utils/difference.ts +15 -2
- package/src/utils/fetch.ts +1 -3
- package/src/utils/parseModels.ts +62 -0
- package/vercel.json +1 -1
- package/docs/package.json +0 -5
- package/src/features/Conversation/Error/APIKeyForm/Anthropic.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/Google.tsx +0 -61
- package/src/features/Conversation/Error/APIKeyForm/Groq.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Mistral.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Moonshot.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx +0 -63
- package/src/features/Conversation/Error/APIKeyForm/OpenRouter.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/Perplexity.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/TogetherAI.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/ZeroOne.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Zhipu.tsx +0 -62
- package/src/libs/agent-runtime/utils/env.ts +0 -1
- package/src/store/global/slices/settings/selectors/__snapshots__/modelProvider.test.ts.snap +0 -230
- /package/src/app/api/chat/{auth.ts → auth/utils.ts} +0 -0
- /package/src/components/{AntdStaticMethods.tsx → AntdStaticMethods/index.tsx} +0 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import { OpenAIStream, StreamingTextResponse } from 'ai';
|
|
2
|
+
import OpenAI, { ClientOptions } from 'openai';
|
|
3
|
+
|
|
4
|
+
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
|
|
5
|
+
import { ChatModelCard } from '@/types/llm';
|
|
6
|
+
|
|
7
|
+
import { LobeRuntimeAI } from '../../BaseAI';
|
|
8
|
+
import { ILobeAgentRuntimeErrorType } from '../../error';
|
|
9
|
+
import { ChatCompetitionOptions, ChatStreamPayload } from '../../types';
|
|
10
|
+
import { AgentRuntimeError } from '../createError';
|
|
11
|
+
import { debugStream } from '../debugStream';
|
|
12
|
+
import { desensitizeUrl } from '../desensitizeUrl';
|
|
13
|
+
import { handleOpenAIError } from '../handleOpenAIError';
|
|
14
|
+
|
|
15
|
+
// the model contains the following keywords is not a chat model, so we should filter them out
|
|
16
|
+
const CHAT_MODELS_BLOCK_LIST = [
|
|
17
|
+
'embedding',
|
|
18
|
+
'davinci',
|
|
19
|
+
'cuire',
|
|
20
|
+
'moderation',
|
|
21
|
+
'ada',
|
|
22
|
+
'babbage',
|
|
23
|
+
'tts',
|
|
24
|
+
'whisper',
|
|
25
|
+
'dall-e',
|
|
26
|
+
];
|
|
27
|
+
|
|
28
|
+
interface OpenAICompatibleFactoryOptions {
|
|
29
|
+
baseURL?: string;
|
|
30
|
+
chatCompletion?: {
|
|
31
|
+
handlePayload?: (payload: ChatStreamPayload) => OpenAI.ChatCompletionCreateParamsStreaming;
|
|
32
|
+
};
|
|
33
|
+
constructorOptions?: ClientOptions;
|
|
34
|
+
debug?: {
|
|
35
|
+
chatCompletion: () => boolean;
|
|
36
|
+
};
|
|
37
|
+
errorType: {
|
|
38
|
+
bizError: ILobeAgentRuntimeErrorType;
|
|
39
|
+
invalidAPIKey: ILobeAgentRuntimeErrorType;
|
|
40
|
+
};
|
|
41
|
+
models?: {
|
|
42
|
+
transformModel?: (model: OpenAI.Model) => ChatModelCard;
|
|
43
|
+
};
|
|
44
|
+
provider: string;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export const LobeOpenAICompatibleFactory = ({
|
|
48
|
+
provider,
|
|
49
|
+
baseURL: DEFAULT_BASE_URL,
|
|
50
|
+
errorType: ErrorType,
|
|
51
|
+
debug,
|
|
52
|
+
constructorOptions,
|
|
53
|
+
chatCompletion,
|
|
54
|
+
models,
|
|
55
|
+
}: OpenAICompatibleFactoryOptions) =>
|
|
56
|
+
class LobeOpenAICompatibleAI implements LobeRuntimeAI {
|
|
57
|
+
client: OpenAI;
|
|
58
|
+
|
|
59
|
+
baseURL: string;
|
|
60
|
+
|
|
61
|
+
constructor({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
|
|
62
|
+
if (!apiKey) throw AgentRuntimeError.createError(ErrorType.invalidAPIKey);
|
|
63
|
+
|
|
64
|
+
this.client = new OpenAI({ apiKey, baseURL, ...constructorOptions, ...res });
|
|
65
|
+
this.baseURL = this.client.baseURL;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
69
|
+
try {
|
|
70
|
+
const postPayload = chatCompletion?.handlePayload
|
|
71
|
+
? chatCompletion.handlePayload(payload)
|
|
72
|
+
: (payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming);
|
|
73
|
+
|
|
74
|
+
const response = await this.client.chat.completions.create(postPayload, {
|
|
75
|
+
// https://github.com/lobehub/lobe-chat/pull/318
|
|
76
|
+
headers: { Accept: '*/*' },
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
const [prod, useForDebug] = response.tee();
|
|
80
|
+
|
|
81
|
+
if (debug?.chatCompletion?.()) {
|
|
82
|
+
debugStream(useForDebug.toReadableStream()).catch(console.error);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
|
|
86
|
+
headers: options?.headers,
|
|
87
|
+
});
|
|
88
|
+
} catch (error) {
|
|
89
|
+
let desensitizedEndpoint = this.baseURL;
|
|
90
|
+
|
|
91
|
+
// refs: https://github.com/lobehub/lobe-chat/issues/842
|
|
92
|
+
if (this.baseURL !== DEFAULT_BASE_URL) {
|
|
93
|
+
desensitizedEndpoint = desensitizeUrl(this.baseURL);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if ('status' in (error as any)) {
|
|
97
|
+
switch ((error as Response).status) {
|
|
98
|
+
case 401: {
|
|
99
|
+
throw AgentRuntimeError.chat({
|
|
100
|
+
endpoint: desensitizedEndpoint,
|
|
101
|
+
error: error as any,
|
|
102
|
+
errorType: ErrorType.invalidAPIKey,
|
|
103
|
+
provider: provider as any,
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
default: {
|
|
108
|
+
break;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
|
114
|
+
|
|
115
|
+
throw AgentRuntimeError.chat({
|
|
116
|
+
endpoint: desensitizedEndpoint,
|
|
117
|
+
error: errorResult,
|
|
118
|
+
errorType: RuntimeError || ErrorType.bizError,
|
|
119
|
+
provider: provider as any,
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
async models() {
|
|
125
|
+
const list = await this.client.models.list();
|
|
126
|
+
|
|
127
|
+
return list.data
|
|
128
|
+
.filter((model) => {
|
|
129
|
+
return CHAT_MODELS_BLOCK_LIST.every(
|
|
130
|
+
(keyword) => !model.id.toLowerCase().includes(keyword),
|
|
131
|
+
);
|
|
132
|
+
})
|
|
133
|
+
.map((item) => {
|
|
134
|
+
if (models?.transformModel) {
|
|
135
|
+
return models.transformModel(item);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === item.id);
|
|
139
|
+
|
|
140
|
+
if (knownModel) return knownModel;
|
|
141
|
+
|
|
142
|
+
return { id: item.id };
|
|
143
|
+
})
|
|
144
|
+
|
|
145
|
+
.filter(Boolean) as ChatModelCard[];
|
|
146
|
+
}
|
|
147
|
+
};
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import { ChatStreamCallbacks, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
|
6
|
+
|
|
7
|
+
import * as debugStreamModule from '../utils/debugStream';
|
|
8
|
+
import { LobeZeroOneAI } from './index';
|
|
9
|
+
|
|
10
|
+
const provider = 'zeroone';
|
|
11
|
+
const defaultBaseURL = 'https://api.lingyiwanwu.com/v1';
|
|
12
|
+
const bizErrorType = 'ZeroOneBizError';
|
|
13
|
+
const invalidErrorType = 'InvalidZeroOneAPIKey';
|
|
14
|
+
|
|
15
|
+
// Mock the console.error to avoid polluting test output
|
|
16
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
17
|
+
|
|
18
|
+
let instance: LobeOpenAICompatibleRuntime;
|
|
19
|
+
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
instance = new LobeZeroOneAI({ apiKey: 'test' });
|
|
22
|
+
|
|
23
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
|
24
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
25
|
+
new ReadableStream() as any,
|
|
26
|
+
);
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
afterEach(() => {
|
|
30
|
+
vi.clearAllMocks();
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
describe('LobeZeroOneAI', () => {
|
|
34
|
+
describe('init', () => {
|
|
35
|
+
it('should correctly initialize with an API key', async () => {
|
|
36
|
+
const instance = new LobeZeroOneAI({ apiKey: 'test_api_key' });
|
|
37
|
+
expect(instance).toBeInstanceOf(LobeZeroOneAI);
|
|
38
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
|
39
|
+
});
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
describe('chat', () => {
|
|
43
|
+
it('should return a StreamingTextResponse on successful API call', async () => {
|
|
44
|
+
// Arrange
|
|
45
|
+
const mockStream = new ReadableStream();
|
|
46
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
47
|
+
|
|
48
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
49
|
+
|
|
50
|
+
// Act
|
|
51
|
+
const result = await instance.chat({
|
|
52
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
53
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
54
|
+
temperature: 0,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Assert
|
|
58
|
+
expect(result).toBeInstanceOf(Response);
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
it('should call ZeroOne API with corresponding options', async () => {
|
|
62
|
+
// Arrange
|
|
63
|
+
const mockStream = new ReadableStream();
|
|
64
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
65
|
+
|
|
66
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
67
|
+
|
|
68
|
+
// Act
|
|
69
|
+
const result = await instance.chat({
|
|
70
|
+
max_tokens: 1024,
|
|
71
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
72
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
73
|
+
temperature: 0.7,
|
|
74
|
+
top_p: 1,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Assert
|
|
78
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
|
79
|
+
{
|
|
80
|
+
max_tokens: 1024,
|
|
81
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
82
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
83
|
+
temperature: 0.7,
|
|
84
|
+
top_p: 1,
|
|
85
|
+
},
|
|
86
|
+
{ headers: { Accept: '*/*' } },
|
|
87
|
+
);
|
|
88
|
+
expect(result).toBeInstanceOf(Response);
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
describe('Error', () => {
|
|
92
|
+
it('should return OpenRouterBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
|
93
|
+
// Arrange
|
|
94
|
+
const apiError = new OpenAI.APIError(
|
|
95
|
+
400,
|
|
96
|
+
{
|
|
97
|
+
status: 400,
|
|
98
|
+
error: {
|
|
99
|
+
message: 'Bad Request',
|
|
100
|
+
},
|
|
101
|
+
},
|
|
102
|
+
'Error message',
|
|
103
|
+
{},
|
|
104
|
+
);
|
|
105
|
+
|
|
106
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
107
|
+
|
|
108
|
+
// Act
|
|
109
|
+
try {
|
|
110
|
+
await instance.chat({
|
|
111
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
112
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
113
|
+
temperature: 0,
|
|
114
|
+
});
|
|
115
|
+
} catch (e) {
|
|
116
|
+
expect(e).toEqual({
|
|
117
|
+
endpoint: defaultBaseURL,
|
|
118
|
+
error: {
|
|
119
|
+
error: { message: 'Bad Request' },
|
|
120
|
+
status: 400,
|
|
121
|
+
},
|
|
122
|
+
errorType: bizErrorType,
|
|
123
|
+
provider,
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
it('should throw AgentRuntimeError with InvalidOpenRouterAPIKey if no apiKey is provided', async () => {
|
|
129
|
+
try {
|
|
130
|
+
new LobeZeroOneAI({});
|
|
131
|
+
} catch (e) {
|
|
132
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
|
133
|
+
}
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
it('should return OpenRouterBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
|
137
|
+
// Arrange
|
|
138
|
+
const errorInfo = {
|
|
139
|
+
stack: 'abc',
|
|
140
|
+
cause: {
|
|
141
|
+
message: 'api is undefined',
|
|
142
|
+
},
|
|
143
|
+
};
|
|
144
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
145
|
+
|
|
146
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
147
|
+
|
|
148
|
+
// Act
|
|
149
|
+
try {
|
|
150
|
+
await instance.chat({
|
|
151
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
152
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
153
|
+
temperature: 0,
|
|
154
|
+
});
|
|
155
|
+
} catch (e) {
|
|
156
|
+
expect(e).toEqual({
|
|
157
|
+
endpoint: defaultBaseURL,
|
|
158
|
+
error: {
|
|
159
|
+
cause: { message: 'api is undefined' },
|
|
160
|
+
stack: 'abc',
|
|
161
|
+
},
|
|
162
|
+
errorType: bizErrorType,
|
|
163
|
+
provider,
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
it('should return OpenRouterBizError with an cause response with desensitize Url', async () => {
|
|
169
|
+
// Arrange
|
|
170
|
+
const errorInfo = {
|
|
171
|
+
stack: 'abc',
|
|
172
|
+
cause: { message: 'api is undefined' },
|
|
173
|
+
};
|
|
174
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
175
|
+
|
|
176
|
+
instance = new LobeZeroOneAI({
|
|
177
|
+
apiKey: 'test',
|
|
178
|
+
|
|
179
|
+
baseURL: 'https://api.abc.com/v1',
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
183
|
+
|
|
184
|
+
// Act
|
|
185
|
+
try {
|
|
186
|
+
await instance.chat({
|
|
187
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
188
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
189
|
+
temperature: 0,
|
|
190
|
+
});
|
|
191
|
+
} catch (e) {
|
|
192
|
+
expect(e).toEqual({
|
|
193
|
+
endpoint: 'https://api.***.com/v1',
|
|
194
|
+
error: {
|
|
195
|
+
cause: { message: 'api is undefined' },
|
|
196
|
+
stack: 'abc',
|
|
197
|
+
},
|
|
198
|
+
errorType: bizErrorType,
|
|
199
|
+
provider,
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
it('should throw an InvalidOpenRouterAPIKey error type on 401 status code', async () => {
|
|
205
|
+
// Mock the API call to simulate a 401 error
|
|
206
|
+
const error = new Error('Unauthorized') as any;
|
|
207
|
+
error.status = 401;
|
|
208
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
|
209
|
+
|
|
210
|
+
try {
|
|
211
|
+
await instance.chat({
|
|
212
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
213
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
214
|
+
temperature: 0,
|
|
215
|
+
});
|
|
216
|
+
} catch (e) {
|
|
217
|
+
// Expect the chat method to throw an error with InvalidMoonshotAPIKey
|
|
218
|
+
expect(e).toEqual({
|
|
219
|
+
endpoint: defaultBaseURL,
|
|
220
|
+
error: new Error('Unauthorized'),
|
|
221
|
+
errorType: invalidErrorType,
|
|
222
|
+
provider,
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
|
228
|
+
// Arrange
|
|
229
|
+
const genericError = new Error('Generic Error');
|
|
230
|
+
|
|
231
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
|
232
|
+
|
|
233
|
+
// Act
|
|
234
|
+
try {
|
|
235
|
+
await instance.chat({
|
|
236
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
237
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
238
|
+
temperature: 0,
|
|
239
|
+
});
|
|
240
|
+
} catch (e) {
|
|
241
|
+
expect(e).toEqual({
|
|
242
|
+
endpoint: defaultBaseURL,
|
|
243
|
+
errorType: 'AgentRuntimeError',
|
|
244
|
+
provider,
|
|
245
|
+
error: {
|
|
246
|
+
name: genericError.name,
|
|
247
|
+
cause: genericError.cause,
|
|
248
|
+
message: genericError.message,
|
|
249
|
+
stack: genericError.stack,
|
|
250
|
+
},
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
describe('LobeZeroOneAI chat with callback and headers', () => {
|
|
257
|
+
it('should handle callback and headers correctly', async () => {
|
|
258
|
+
// 模拟 chat.completions.create 方法返回一个可读流
|
|
259
|
+
const mockCreateMethod = vi
|
|
260
|
+
.spyOn(instance['client'].chat.completions, 'create')
|
|
261
|
+
.mockResolvedValue(
|
|
262
|
+
new ReadableStream({
|
|
263
|
+
start(controller) {
|
|
264
|
+
controller.enqueue({
|
|
265
|
+
id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
|
|
266
|
+
object: 'chat.completion.chunk',
|
|
267
|
+
created: 1709125675,
|
|
268
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
269
|
+
system_fingerprint: 'fp_86156a94a0',
|
|
270
|
+
choices: [
|
|
271
|
+
{ index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
|
|
272
|
+
],
|
|
273
|
+
});
|
|
274
|
+
controller.close();
|
|
275
|
+
},
|
|
276
|
+
}) as any,
|
|
277
|
+
);
|
|
278
|
+
|
|
279
|
+
// 准备 callback 和 headers
|
|
280
|
+
const mockCallback: ChatStreamCallbacks = {
|
|
281
|
+
onStart: vi.fn(),
|
|
282
|
+
onToken: vi.fn(),
|
|
283
|
+
};
|
|
284
|
+
const mockHeaders = { 'Custom-Header': 'TestValue' };
|
|
285
|
+
|
|
286
|
+
// 执行测试
|
|
287
|
+
const result = await instance.chat(
|
|
288
|
+
{
|
|
289
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
290
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
291
|
+
temperature: 0,
|
|
292
|
+
},
|
|
293
|
+
{ callback: mockCallback, headers: mockHeaders },
|
|
294
|
+
);
|
|
295
|
+
|
|
296
|
+
// 验证 callback 被调用
|
|
297
|
+
await result.text(); // 确保流被消费
|
|
298
|
+
expect(mockCallback.onStart).toHaveBeenCalled();
|
|
299
|
+
expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
|
|
300
|
+
|
|
301
|
+
// 验证 headers 被正确传递
|
|
302
|
+
expect(result.headers.get('Custom-Header')).toEqual('TestValue');
|
|
303
|
+
|
|
304
|
+
// 清理
|
|
305
|
+
mockCreateMethod.mockRestore();
|
|
306
|
+
});
|
|
307
|
+
});
|
|
308
|
+
|
|
309
|
+
describe('DEBUG', () => {
|
|
310
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_ZEROONE_CHAT_COMPLETION is 1', async () => {
|
|
311
|
+
// Arrange
|
|
312
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
|
313
|
+
const mockDebugStream = new ReadableStream({
|
|
314
|
+
start(controller) {
|
|
315
|
+
controller.enqueue('Debug stream content');
|
|
316
|
+
controller.close();
|
|
317
|
+
},
|
|
318
|
+
}) as any;
|
|
319
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
|
320
|
+
|
|
321
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
|
322
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
|
323
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
|
324
|
+
});
|
|
325
|
+
|
|
326
|
+
// 保存原始环境变量值
|
|
327
|
+
const originalDebugValue = process.env.DEBUG_ZEROONE_CHAT_COMPLETION;
|
|
328
|
+
|
|
329
|
+
// 模拟环境变量
|
|
330
|
+
process.env.DEBUG_ZEROONE_CHAT_COMPLETION = '1';
|
|
331
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
|
332
|
+
|
|
333
|
+
// 执行测试
|
|
334
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
|
335
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
|
336
|
+
await instance.chat({
|
|
337
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
338
|
+
model: 'mistralai/mistral-7b-instruct:free',
|
|
339
|
+
temperature: 0,
|
|
340
|
+
});
|
|
341
|
+
|
|
342
|
+
// 验证 debugStream 被调用
|
|
343
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
|
344
|
+
|
|
345
|
+
// 恢复原始环境变量值
|
|
346
|
+
process.env.DEBUG_ZEROONE_CHAT_COMPLETION = originalDebugValue;
|
|
347
|
+
});
|
|
348
|
+
});
|
|
349
|
+
});
|
|
350
|
+
});
|
|
@@ -1,78 +1,15 @@
|
|
|
1
|
-
import { OpenAIStream, StreamingTextResponse } from 'ai';
|
|
2
|
-
import OpenAI, { ClientOptions } from 'openai';
|
|
3
|
-
|
|
4
|
-
import { LobeRuntimeAI } from '../BaseAI';
|
|
5
1
|
import { AgentRuntimeErrorType } from '../error';
|
|
6
|
-
import {
|
|
7
|
-
import {
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidZeroOneAPIKey);
|
|
21
|
-
|
|
22
|
-
this.client = new OpenAI({ apiKey, baseURL, ...res });
|
|
23
|
-
this.baseURL = this.client.baseURL;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
27
|
-
try {
|
|
28
|
-
const response = await this.client.chat.completions.create(
|
|
29
|
-
payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
|
30
|
-
);
|
|
31
|
-
const [prod, debug] = response.tee();
|
|
32
|
-
|
|
33
|
-
if (process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1') {
|
|
34
|
-
debugStream(debug.toReadableStream()).catch(console.error);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
|
|
38
|
-
headers: options?.headers,
|
|
39
|
-
});
|
|
40
|
-
} catch (error) {
|
|
41
|
-
let desensitizedEndpoint = this.baseURL;
|
|
42
|
-
|
|
43
|
-
if (this.baseURL !== DEFAULT_BASE_URL) {
|
|
44
|
-
desensitizedEndpoint = desensitizeUrl(this.baseURL);
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
if ('status' in (error as any)) {
|
|
48
|
-
switch ((error as Response).status) {
|
|
49
|
-
case 401: {
|
|
50
|
-
throw AgentRuntimeError.chat({
|
|
51
|
-
endpoint: desensitizedEndpoint,
|
|
52
|
-
error: error as any,
|
|
53
|
-
errorType: AgentRuntimeErrorType.InvalidZeroOneAPIKey,
|
|
54
|
-
provider: ModelProvider.ZeroOne,
|
|
55
|
-
});
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
default: {
|
|
59
|
-
break;
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
|
65
|
-
|
|
66
|
-
const errorType = RuntimeError || AgentRuntimeErrorType.ZeroOneBizError;
|
|
67
|
-
|
|
68
|
-
throw AgentRuntimeError.chat({
|
|
69
|
-
endpoint: desensitizedEndpoint,
|
|
70
|
-
error: errorResult,
|
|
71
|
-
errorType,
|
|
72
|
-
provider: ModelProvider.ZeroOne,
|
|
73
|
-
});
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
export default LobeZeroOneAI;
|
|
2
|
+
import { ModelProvider } from '../types';
|
|
3
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
|
4
|
+
|
|
5
|
+
export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
|
|
6
|
+
baseURL: 'https://api.lingyiwanwu.com/v1',
|
|
7
|
+
debug: {
|
|
8
|
+
chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
|
|
9
|
+
},
|
|
10
|
+
errorType: {
|
|
11
|
+
bizError: AgentRuntimeErrorType.ZeroOneBizError,
|
|
12
|
+
invalidAPIKey: AgentRuntimeErrorType.InvalidZeroOneAPIKey,
|
|
13
|
+
},
|
|
14
|
+
provider: ModelProvider.ZeroOne,
|
|
15
|
+
});
|
|
@@ -1,18 +1,7 @@
|
|
|
1
1
|
export default {
|
|
2
|
-
ModelSelect: {
|
|
3
|
-
featureTag: {
|
|
4
|
-
custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性',
|
|
5
|
-
file: '该模型支持上传文件读取与识别',
|
|
6
|
-
functionCall: '该模型支持函数调用(Function Call)',
|
|
7
|
-
tokens: '该模型单个会话最多支持 {{tokens}} Tokens',
|
|
8
|
-
vision: '该模型支持视觉识别',
|
|
9
|
-
},
|
|
10
|
-
},
|
|
11
2
|
about: '关于',
|
|
12
3
|
advanceSettings: '高级设置',
|
|
13
|
-
|
|
14
4
|
appInitializing: 'LobeChat 启动中,请耐心等待...',
|
|
15
|
-
|
|
16
5
|
autoGenerate: '自动补全',
|
|
17
6
|
autoGenerateTooltip: '基于提示词自动补全助手描述',
|
|
18
7
|
cancel: '取消',
|
|
@@ -97,23 +86,6 @@ export default {
|
|
|
97
86
|
'zh-TW': '繁体中文',
|
|
98
87
|
},
|
|
99
88
|
layoutInitializing: '正在加载布局...',
|
|
100
|
-
modelProvider: {
|
|
101
|
-
anthropic: 'Anthropic',
|
|
102
|
-
azure: 'Azure',
|
|
103
|
-
bedrock: 'AWS Bedrock',
|
|
104
|
-
google: 'Google',
|
|
105
|
-
groq: 'Groq',
|
|
106
|
-
mistral: 'Mistral AI',
|
|
107
|
-
moonshot: 'Moonshot AI',
|
|
108
|
-
ollama: 'Ollama',
|
|
109
|
-
oneapi: 'One API',
|
|
110
|
-
openai: 'OpenAI',
|
|
111
|
-
openrouter: 'OpenRouter',
|
|
112
|
-
perplexity: 'Perplexity',
|
|
113
|
-
togetherai: 'TogetherAI',
|
|
114
|
-
zeroone: '01.AI 零一万物',
|
|
115
|
-
zhipu: '智谱AI',
|
|
116
|
-
},
|
|
117
89
|
noDescription: '暂无描述',
|
|
118
90
|
oauth: 'SSO 登录',
|
|
119
91
|
ok: '确定',
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export default {
|
|
2
|
+
ModelSelect: {
|
|
3
|
+
featureTag: {
|
|
4
|
+
custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性',
|
|
5
|
+
file: '该模型支持上传文件读取与识别',
|
|
6
|
+
functionCall: '该模型支持函数调用(Function Call)',
|
|
7
|
+
tokens: '该模型单个会话最多支持 {{tokens}} Tokens',
|
|
8
|
+
vision: '该模型支持视觉识别',
|
|
9
|
+
},
|
|
10
|
+
},
|
|
11
|
+
ModelSwitchPanel: {
|
|
12
|
+
emptyModel: '没有启用的模型,请前往设置开启',
|
|
13
|
+
provider: '提供商',
|
|
14
|
+
},
|
|
15
|
+
};
|