@lobehub/chat 0.146.1 → 0.147.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +3 -5
- package/.github/workflows/issue-close-require.yml +1 -1
- package/.github/workflows/release.yml +1 -1
- package/.github/workflows/test.yml +3 -1
- package/.i18nrc.js +13 -8
- package/.seorc.cjs +9 -0
- package/CHANGELOG.md +98 -0
- package/README.md +25 -25
- package/README.zh-CN.md +25 -25
- package/contributing/Home.md +1 -1
- package/docs/self-hosting/advanced/analytics.mdx +12 -0
- package/docs/self-hosting/advanced/analytics.zh-CN.mdx +10 -0
- package/docs/self-hosting/advanced/authentication.mdx +19 -0
- package/docs/self-hosting/advanced/authentication.zh-CN.mdx +15 -0
- package/docs/self-hosting/advanced/sso-providers/auth0.mdx +19 -2
- package/docs/self-hosting/advanced/sso-providers/auth0.zh-CN.mdx +15 -2
- package/docs/self-hosting/advanced/sso-providers/authentik.mdx +18 -3
- package/docs/self-hosting/advanced/sso-providers/authentik.zh-CN.mdx +14 -2
- package/docs/self-hosting/advanced/sso-providers/github.mdx +13 -0
- package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +13 -3
- package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.mdx +18 -2
- package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.zh-CN.mdx +15 -2
- package/docs/self-hosting/advanced/sso-providers/zitadel.mdx +17 -2
- package/docs/self-hosting/advanced/sso-providers/zitadel.zh-CN.mdx +14 -2
- package/docs/self-hosting/advanced/upstream-sync.mdx +13 -0
- package/docs/self-hosting/advanced/upstream-sync.zh-CN.mdx +10 -0
- package/docs/self-hosting/environment-variables/analytics.mdx +15 -0
- package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +13 -0
- package/docs/self-hosting/environment-variables/auth.mdx +14 -0
- package/docs/self-hosting/environment-variables/auth.zh-CN.mdx +15 -1
- package/docs/self-hosting/environment-variables/basic.mdx +15 -0
- package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +11 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +26 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +10 -0
- package/docs/self-hosting/environment-variables.mdx +13 -2
- package/docs/self-hosting/environment-variables.zh-CN.mdx +9 -0
- package/docs/self-hosting/examples/azure-openai.mdx +12 -0
- package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +12 -0
- package/docs/self-hosting/examples/ollama.mdx +13 -0
- package/docs/self-hosting/examples/ollama.zh-CN.mdx +11 -0
- package/docs/self-hosting/faq/no-v1-suffix.mdx +12 -0
- package/docs/self-hosting/faq/no-v1-suffix.zh-CN.mdx +9 -0
- package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.mdx +14 -0
- package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/docker-compose.mdx +21 -5
- package/docs/self-hosting/platform/docker-compose.zh-CN.mdx +18 -5
- package/docs/self-hosting/platform/docker.mdx +26 -8
- package/docs/self-hosting/platform/docker.zh-CN.mdx +27 -9
- package/docs/self-hosting/platform/netlify.mdx +18 -2
- package/docs/self-hosting/platform/netlify.zh-CN.mdx +14 -1
- package/docs/self-hosting/platform/railway.mdx +12 -0
- package/docs/self-hosting/platform/railway.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/repocloud.mdx +12 -0
- package/docs/self-hosting/platform/repocloud.zh-CN.mdx +10 -0
- package/docs/self-hosting/platform/sealos.mdx +11 -0
- package/docs/self-hosting/platform/sealos.zh-CN.mdx +10 -0
- package/docs/self-hosting/platform/vercel.mdx +12 -0
- package/docs/self-hosting/platform/vercel.zh-CN.mdx +11 -0
- package/docs/self-hosting/platform/zeabur.mdx +11 -0
- package/docs/self-hosting/platform/zeabur.zh-CN.mdx +10 -0
- package/docs/self-hosting/start.mdx +12 -0
- package/docs/self-hosting/start.zh-CN.mdx +13 -0
- package/docs/usage/agents/concepts.mdx +13 -0
- package/docs/usage/agents/concepts.zh-CN.mdx +10 -0
- package/docs/usage/agents/custom-agent.mdx +16 -2
- package/docs/usage/agents/custom-agent.zh-CN.mdx +14 -2
- package/docs/usage/agents/model.mdx +11 -2
- package/docs/usage/agents/model.zh-CN.mdx +12 -0
- package/docs/usage/agents/prompt.mdx +15 -0
- package/docs/usage/agents/prompt.zh-CN.mdx +10 -0
- package/docs/usage/agents/topics.mdx +13 -0
- package/docs/usage/agents/topics.zh-CN.mdx +11 -0
- package/docs/usage/features/agent-market.mdx +11 -1
- package/docs/usage/features/agent-market.zh-CN.mdx +13 -0
- package/docs/usage/features/local-llm.mdx +12 -2
- package/docs/usage/features/local-llm.zh-CN.mdx +3 -3
- package/docs/usage/features/mobile.mdx +10 -1
- package/docs/usage/features/mobile.zh-CN.mdx +11 -0
- package/docs/usage/features/more.mdx +13 -0
- package/docs/usage/features/more.zh-CN.mdx +10 -0
- package/docs/usage/features/multi-ai-providers.mdx +14 -1
- package/docs/usage/features/multi-ai-providers.zh-CN.mdx +16 -0
- package/docs/usage/features/plugin-system.mdx +12 -1
- package/docs/usage/features/plugin-system.zh-CN.mdx +9 -0
- package/docs/usage/features/pwa.mdx +11 -1
- package/docs/usage/features/pwa.zh-CN.mdx +13 -0
- package/docs/usage/features/text-to-image.mdx +11 -1
- package/docs/usage/features/text-to-image.zh-CN.mdx +13 -0
- package/docs/usage/features/theme.mdx +12 -1
- package/docs/usage/features/theme.zh-CN.mdx +11 -0
- package/docs/usage/features/tts.mdx +14 -1
- package/docs/usage/features/tts.zh-CN.mdx +12 -0
- package/docs/usage/features/vision.mdx +11 -1
- package/docs/usage/features/vision.zh-CN.mdx +10 -0
- package/docs/usage/plugins/basic-usage.mdx +12 -0
- package/docs/usage/plugins/basic-usage.zh-CN.mdx +10 -0
- package/docs/usage/plugins/custom-plugin.mdx +12 -0
- package/docs/usage/plugins/custom-plugin.zh-CN.mdx +10 -0
- package/docs/usage/plugins/development.mdx +18 -0
- package/docs/usage/plugins/development.zh-CN.mdx +12 -0
- package/docs/usage/plugins/store.mdx +12 -0
- package/docs/usage/plugins/store.zh-CN.mdx +9 -0
- package/docs/usage/providers/groq.mdx +14 -2
- package/docs/usage/providers/groq.zh-CN.mdx +12 -2
- package/docs/usage/providers/ollama/gemma.mdx +13 -3
- package/docs/usage/providers/ollama/gemma.zh-CN.mdx +12 -3
- package/docs/usage/providers/ollama/qwen.mdx +12 -4
- package/docs/usage/providers/ollama/qwen.zh-CN.mdx +10 -3
- package/docs/usage/providers/ollama.mdx +19 -9
- package/docs/usage/providers/ollama.zh-CN.mdx +20 -9
- package/docs/usage/start.mdx +13 -2
- package/docs/usage/start.zh-CN.mdx +11 -2
- package/locales/ar/common.json +0 -26
- package/locales/ar/components.json +15 -0
- package/locales/ar/error.json +1 -52
- package/locales/ar/modelProvider.json +226 -0
- package/locales/ar/setting.json +48 -199
- package/locales/bg-BG/common.json +0 -26
- package/locales/bg-BG/components.json +15 -0
- package/locales/bg-BG/error.json +1 -52
- package/locales/bg-BG/modelProvider.json +226 -0
- package/locales/bg-BG/setting.json +48 -199
- package/locales/de-DE/common.json +0 -26
- package/locales/de-DE/components.json +15 -0
- package/locales/de-DE/error.json +1 -52
- package/locales/de-DE/modelProvider.json +226 -0
- package/locales/de-DE/setting.json +48 -199
- package/locales/en-US/common.json +0 -26
- package/locales/en-US/components.json +15 -0
- package/locales/en-US/error.json +1 -52
- package/locales/en-US/modelProvider.json +226 -0
- package/locales/en-US/setting.json +48 -199
- package/locales/es-ES/common.json +0 -26
- package/locales/es-ES/components.json +15 -0
- package/locales/es-ES/error.json +1 -52
- package/locales/es-ES/modelProvider.json +226 -0
- package/locales/es-ES/setting.json +48 -199
- package/locales/fr-FR/common.json +0 -26
- package/locales/fr-FR/components.json +15 -0
- package/locales/fr-FR/error.json +1 -52
- package/locales/fr-FR/modelProvider.json +226 -0
- package/locales/fr-FR/setting.json +48 -199
- package/locales/it-IT/common.json +0 -26
- package/locales/it-IT/components.json +15 -0
- package/locales/it-IT/error.json +1 -52
- package/locales/it-IT/modelProvider.json +226 -0
- package/locales/it-IT/setting.json +59 -210
- package/locales/ja-JP/common.json +0 -26
- package/locales/ja-JP/components.json +15 -0
- package/locales/ja-JP/error.json +1 -52
- package/locales/ja-JP/modelProvider.json +226 -0
- package/locales/ja-JP/setting.json +59 -210
- package/locales/ko-KR/common.json +0 -26
- package/locales/ko-KR/components.json +15 -0
- package/locales/ko-KR/error.json +1 -52
- package/locales/ko-KR/modelProvider.json +226 -0
- package/locales/ko-KR/setting.json +48 -199
- package/locales/nl-NL/common.json +0 -26
- package/locales/nl-NL/components.json +15 -0
- package/locales/nl-NL/error.json +4 -55
- package/locales/nl-NL/modelProvider.json +226 -0
- package/locales/nl-NL/setting.json +49 -200
- package/locales/pl-PL/common.json +0 -26
- package/locales/pl-PL/components.json +15 -0
- package/locales/pl-PL/error.json +1 -52
- package/locales/pl-PL/modelProvider.json +226 -0
- package/locales/pl-PL/setting.json +48 -199
- package/locales/pt-BR/common.json +0 -26
- package/locales/pt-BR/components.json +15 -0
- package/locales/pt-BR/error.json +1 -52
- package/locales/pt-BR/modelProvider.json +226 -0
- package/locales/pt-BR/setting.json +48 -199
- package/locales/ru-RU/common.json +0 -26
- package/locales/ru-RU/components.json +15 -0
- package/locales/ru-RU/error.json +1 -52
- package/locales/ru-RU/modelProvider.json +226 -0
- package/locales/ru-RU/setting.json +48 -199
- package/locales/tr-TR/common.json +0 -26
- package/locales/tr-TR/components.json +15 -0
- package/locales/tr-TR/error.json +1 -52
- package/locales/tr-TR/modelProvider.json +226 -0
- package/locales/tr-TR/setting.json +48 -199
- package/locales/vi-VN/common.json +0 -26
- package/locales/vi-VN/components.json +15 -0
- package/locales/vi-VN/error.json +1 -52
- package/locales/vi-VN/modelProvider.json +226 -0
- package/locales/vi-VN/setting.json +48 -199
- package/locales/zh-CN/common.json +0 -26
- package/locales/zh-CN/components.json +15 -0
- package/locales/zh-CN/error.json +2 -53
- package/locales/zh-CN/modelProvider.json +226 -0
- package/locales/zh-CN/setting.json +48 -199
- package/locales/zh-TW/common.json +0 -26
- package/locales/zh-TW/components.json +15 -0
- package/locales/zh-TW/error.json +1 -52
- package/locales/zh-TW/modelProvider.json +226 -0
- package/locales/zh-TW/setting.json +48 -199
- package/package.json +121 -117
- package/scripts/mdxWorkflow/index.ts +48 -0
- package/src/app/api/chat/[provider]/route.test.ts +4 -9
- package/src/app/api/chat/[provider]/route.ts +6 -23
- package/src/app/api/chat/{[provider]/agentRuntime.test.ts → agentRuntime.test.ts} +12 -12
- package/src/app/api/chat/{[provider]/agentRuntime.ts → agentRuntime.ts} +11 -30
- package/src/app/api/chat/auth/index.ts +42 -0
- package/src/app/api/chat/models/[provider]/route.ts +45 -0
- package/src/app/api/config/__snapshots__/route.test.ts.snap +127 -0
- package/src/app/api/config/route.test.ts +170 -0
- package/src/app/api/config/route.ts +46 -11
- package/src/app/api/plugin/gateway/route.ts +1 -1
- package/src/app/settings/llm/Azure/index.tsx +36 -43
- package/src/app/settings/llm/Bedrock/index.tsx +12 -20
- package/src/app/settings/llm/Ollama/index.tsx +2 -2
- package/src/app/settings/llm/OpenAI/index.tsx +9 -121
- package/src/app/settings/llm/OpenRouter/index.tsx +1 -1
- package/src/app/settings/llm/TogetherAI/index.tsx +0 -1
- package/src/app/settings/llm/components/ProviderConfig/index.tsx +48 -32
- package/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx +94 -0
- package/src/app/settings/llm/components/ProviderModelList/MaxTokenSlider.tsx +88 -0
- package/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx +128 -0
- package/src/app/settings/llm/components/ProviderModelList/ModelFetcher.tsx +81 -0
- package/src/app/settings/llm/components/ProviderModelList/Option.tsx +38 -0
- package/src/app/settings/llm/components/ProviderModelList/index.tsx +142 -0
- package/src/app/settings/llm/const.ts +1 -1
- package/src/app/settings/llm/index.tsx +7 -6
- package/src/app/settings/tts/TTS/index.tsx +1 -1
- package/src/components/AntdStaticMethods/index.test.tsx +43 -0
- package/src/components/ModelIcon/index.tsx +25 -7
- package/src/components/ModelSelect/index.tsx +67 -56
- package/src/config/modelProviders/anthropic.ts +6 -1
- package/src/config/modelProviders/azure.ts +42 -0
- package/src/config/modelProviders/bedrock.ts +4 -1
- package/src/config/modelProviders/google.ts +2 -14
- package/src/config/modelProviders/groq.ts +3 -0
- package/src/config/modelProviders/index.ts +19 -14
- package/src/config/modelProviders/mistral.ts +5 -0
- package/src/config/modelProviders/moonshot.ts +3 -1
- package/src/config/modelProviders/ollama.ts +6 -17
- package/src/config/modelProviders/openai.ts +9 -13
- package/src/config/modelProviders/openrouter.ts +6 -1
- package/src/config/modelProviders/perplexity.ts +2 -0
- package/src/config/modelProviders/togetherai.ts +11 -0
- package/src/config/modelProviders/zeroone.ts +3 -0
- package/src/config/modelProviders/zhipu.ts +3 -0
- package/src/config/server/provider.ts +52 -7
- package/src/const/auth.ts +0 -1
- package/src/const/{settings.ts → settings/index.ts} +31 -4
- package/src/const/url.ts +1 -1
- package/src/database/core/db.ts +14 -0
- package/src/database/schemas/user.ts +1 -25
- package/src/features/AgentSetting/AgentConfig/ModelSelect.tsx +7 -11
- package/src/features/ChatInput/STT/common.tsx +80 -78
- package/src/features/ChatInput/STT/index.tsx +8 -6
- package/src/features/Conversation/Error/APIKeyForm/Bedrock.tsx +8 -8
- package/src/features/Conversation/Error/APIKeyForm/ProviderApiKeyForm.tsx +73 -0
- package/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +74 -0
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +25 -49
- package/src/features/ModelSwitchPanel/index.tsx +40 -13
- package/src/hooks/_header.ts +5 -20
- package/src/libs/agent-runtime/BaseAI.ts +17 -1
- package/src/libs/agent-runtime/anthropic/index.ts +3 -7
- package/src/libs/agent-runtime/azureOpenai/index.test.ts +166 -0
- package/src/libs/agent-runtime/azureOpenai/index.ts +16 -8
- package/src/libs/agent-runtime/bedrock/index.test.ts +199 -22
- package/src/libs/agent-runtime/bedrock/index.ts +11 -18
- package/src/libs/agent-runtime/groq/index.test.ts +350 -0
- package/src/libs/agent-runtime/groq/index.ts +14 -77
- package/src/libs/agent-runtime/mistral/index.test.ts +25 -19
- package/src/libs/agent-runtime/mistral/index.ts +24 -86
- package/src/libs/agent-runtime/moonshot/index.test.ts +2 -2
- package/src/libs/agent-runtime/moonshot/index.ts +14 -77
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +99 -0
- package/src/libs/agent-runtime/openai/fixtures/openai-models.json +170 -0
- package/src/libs/agent-runtime/openai/index.test.ts +15 -50
- package/src/libs/agent-runtime/openai/index.ts +15 -107
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +82 -0
- package/src/libs/agent-runtime/openrouter/fixtures/models.json +62 -0
- package/src/libs/agent-runtime/openrouter/index.test.ts +25 -9
- package/src/libs/agent-runtime/openrouter/index.ts +42 -84
- package/src/libs/agent-runtime/openrouter/type.ts +28 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +12 -9
- package/src/libs/agent-runtime/togetherai/index.ts +20 -85
- package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +14 -22
- package/src/libs/agent-runtime/utils/anthropicHelpers.ts +4 -10
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +147 -0
- package/src/libs/agent-runtime/zeroone/index.test.ts +350 -0
- package/src/libs/agent-runtime/zeroone/index.ts +14 -77
- package/src/locales/default/common.ts +0 -28
- package/src/locales/default/components.ts +15 -0
- package/src/locales/default/error.ts +2 -54
- package/src/locales/default/index.ts +4 -0
- package/src/locales/default/modelProvider.ts +229 -0
- package/src/locales/default/setting.ts +51 -202
- package/src/migrations/FromV3ToV4/fixtures/azure-input-v3.json +79 -0
- package/src/migrations/FromV3ToV4/fixtures/azure-output-v4.json +75 -0
- package/src/migrations/FromV3ToV4/fixtures/ollama-input-v3.json +85 -0
- package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +86 -0
- package/src/migrations/FromV3ToV4/fixtures/openai-input-v3.json +77 -0
- package/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json +79 -0
- package/src/migrations/FromV3ToV4/fixtures/openrouter-input-v3.json +82 -0
- package/src/migrations/FromV3ToV4/fixtures/openrouter-output-v4.json +89 -0
- package/src/migrations/FromV3ToV4/fixtures/output-v4-from-v1.json +203 -0
- package/src/migrations/FromV3ToV4/index.ts +96 -0
- package/src/migrations/FromV3ToV4/migrations.test.ts +195 -0
- package/src/migrations/FromV3ToV4/types/v3.ts +59 -0
- package/src/migrations/FromV3ToV4/types/v4.ts +37 -0
- package/src/migrations/index.ts +11 -3
- package/src/services/_auth.test.ts +4 -6
- package/src/services/_auth.ts +8 -60
- package/src/services/_header.ts +5 -22
- package/src/services/_url.ts +1 -0
- package/src/services/chat.ts +16 -6
- package/src/services/global.ts +1 -1
- package/src/services/models.ts +23 -0
- package/src/services/ollama.ts +2 -2
- package/src/store/chat/slices/share/action.test.ts +113 -0
- package/src/store/chat/slices/share/action.ts +1 -1
- package/src/store/global/slices/common/action.test.ts +166 -1
- package/src/store/global/slices/common/action.ts +2 -1
- package/src/store/global/slices/settings/{action.test.ts → actions/general.test.ts} +1 -19
- package/src/store/global/slices/settings/{action.ts → actions/general.ts} +4 -19
- package/src/store/global/slices/settings/actions/index.ts +18 -0
- package/src/store/global/slices/settings/actions/llm.test.ts +60 -0
- package/src/store/global/slices/settings/actions/llm.ts +88 -0
- package/src/store/global/slices/settings/initialState.ts +3 -1
- package/src/store/global/slices/settings/reducers/customModelCard.test.ts +204 -0
- package/src/store/global/slices/settings/reducers/customModelCard.ts +64 -0
- package/src/store/global/slices/settings/selectors/index.ts +1 -0
- package/src/store/global/slices/settings/selectors/modelConfig.test.ts +189 -0
- package/src/store/global/slices/settings/selectors/modelConfig.ts +179 -0
- package/src/store/global/slices/settings/selectors/modelProvider.test.ts +47 -138
- package/src/store/global/slices/settings/selectors/modelProvider.ts +102 -243
- package/src/store/global/store.ts +1 -1
- package/src/types/llm.ts +12 -1
- package/src/types/serverConfig.ts +22 -0
- package/src/types/settings/index.ts +0 -12
- package/src/types/settings/modelProvider.ts +34 -90
- package/src/utils/client/switchLang.test.ts +34 -0
- package/src/utils/difference.test.ts +46 -0
- package/src/utils/difference.ts +15 -2
- package/src/utils/fetch.ts +1 -3
- package/src/utils/parseModels.ts +62 -0
- package/vercel.json +1 -1
- package/docs/package.json +0 -5
- package/src/features/Conversation/Error/APIKeyForm/Anthropic.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/Google.tsx +0 -61
- package/src/features/Conversation/Error/APIKeyForm/Groq.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Mistral.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Moonshot.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/OpenAI.tsx +0 -63
- package/src/features/Conversation/Error/APIKeyForm/OpenRouter.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/Perplexity.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/TogetherAI.tsx +0 -40
- package/src/features/Conversation/Error/APIKeyForm/ZeroOne.tsx +0 -60
- package/src/features/Conversation/Error/APIKeyForm/Zhipu.tsx +0 -62
- package/src/libs/agent-runtime/utils/env.ts +0 -1
- package/src/store/global/slices/settings/selectors/__snapshots__/modelProvider.test.ts.snap +0 -230
- /package/src/app/api/chat/{auth.ts → auth/utils.ts} +0 -0
- /package/src/components/{AntdStaticMethods.tsx → AntdStaticMethods/index.tsx} +0 -0
|
@@ -6,11 +6,7 @@ import { ClientOptions } from 'openai';
|
|
|
6
6
|
|
|
7
7
|
import { LobeRuntimeAI } from '../BaseAI';
|
|
8
8
|
import { AgentRuntimeErrorType } from '../error';
|
|
9
|
-
import {
|
|
10
|
-
ChatCompetitionOptions,
|
|
11
|
-
ChatStreamPayload,
|
|
12
|
-
ModelProvider
|
|
13
|
-
} from '../types';
|
|
9
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
14
10
|
import { AgentRuntimeError } from '../utils/createError';
|
|
15
11
|
import { debugStream } from '../utils/debugStream';
|
|
16
12
|
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
|
@@ -20,12 +16,12 @@ const DEFAULT_BASE_URL = 'https://api.anthropic.com';
|
|
|
20
16
|
|
|
21
17
|
export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
22
18
|
private client: Anthropic;
|
|
23
|
-
|
|
19
|
+
|
|
24
20
|
baseURL: string;
|
|
25
21
|
|
|
26
22
|
constructor({ apiKey, baseURL = DEFAULT_BASE_URL }: ClientOptions) {
|
|
27
23
|
if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidAnthropicAPIKey);
|
|
28
|
-
|
|
24
|
+
|
|
29
25
|
this.client = new Anthropic({ apiKey, baseURL });
|
|
30
26
|
this.baseURL = this.client.baseURL;
|
|
31
27
|
}
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
|
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import * as debugStreamModule from '../utils/debugStream';
|
|
6
|
+
import { LobeAzureOpenAI } from './index';
|
|
7
|
+
|
|
8
|
+
// Mock the console.error to avoid polluting test output
|
|
9
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
10
|
+
|
|
11
|
+
describe('LobeAzureOpenAI', () => {
|
|
12
|
+
let instance: LobeAzureOpenAI;
|
|
13
|
+
|
|
14
|
+
beforeEach(() => {
|
|
15
|
+
instance = new LobeAzureOpenAI(
|
|
16
|
+
'https://test.openai.azure.com/',
|
|
17
|
+
'test_key',
|
|
18
|
+
'2023-03-15-preview',
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
// 使用 vi.spyOn 来模拟 streamChatCompletions 方法
|
|
22
|
+
vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(
|
|
23
|
+
new ReadableStream() as any,
|
|
24
|
+
);
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
afterEach(() => {
|
|
28
|
+
vi.clearAllMocks();
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
describe('constructor', () => {
|
|
32
|
+
it('should throw InvalidAzureAPIKey error when apikey or endpoint is missing', () => {
|
|
33
|
+
try {
|
|
34
|
+
new LobeAzureOpenAI();
|
|
35
|
+
} catch (e) {
|
|
36
|
+
expect(e).toEqual({ errorType: 'InvalidAzureAPIKey' });
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
it('should create an instance of OpenAIClient with correct parameters', () => {
|
|
41
|
+
const endpoint = 'https://test.openai.azure.com/';
|
|
42
|
+
const apikey = 'test_key';
|
|
43
|
+
const apiVersion = '2023-03-15-preview';
|
|
44
|
+
|
|
45
|
+
const instance = new LobeAzureOpenAI(endpoint, apikey, apiVersion);
|
|
46
|
+
|
|
47
|
+
expect(instance.client).toBeInstanceOf(OpenAIClient);
|
|
48
|
+
expect(instance.baseURL).toBe(endpoint);
|
|
49
|
+
});
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
describe('chat', () => {
|
|
53
|
+
it('should return a StreamingTextResponse on successful API call', async () => {
|
|
54
|
+
// Arrange
|
|
55
|
+
const mockStream = new ReadableStream();
|
|
56
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
57
|
+
|
|
58
|
+
(instance['client'].streamChatCompletions as Mock).mockResolvedValue(mockResponse);
|
|
59
|
+
|
|
60
|
+
// Act
|
|
61
|
+
const result = await instance.chat({
|
|
62
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
63
|
+
model: 'text-davinci-003',
|
|
64
|
+
temperature: 0,
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
// Assert
|
|
68
|
+
expect(result).toBeInstanceOf(Response);
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
describe('Error', () => {
|
|
72
|
+
it('should return AzureBizError with DeploymentNotFound error', async () => {
|
|
73
|
+
// Arrange
|
|
74
|
+
const error = {
|
|
75
|
+
code: 'DeploymentNotFound',
|
|
76
|
+
message: 'Deployment not found',
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
(instance['client'].streamChatCompletions as Mock).mockRejectedValue(error);
|
|
80
|
+
|
|
81
|
+
// Act
|
|
82
|
+
try {
|
|
83
|
+
await instance.chat({
|
|
84
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
85
|
+
model: 'text-davinci-003',
|
|
86
|
+
temperature: 0,
|
|
87
|
+
});
|
|
88
|
+
} catch (e) {
|
|
89
|
+
// Assert
|
|
90
|
+
expect(e).toEqual({
|
|
91
|
+
endpoint: 'https://test.openai.azure.com/',
|
|
92
|
+
error: {
|
|
93
|
+
code: 'DeploymentNotFound',
|
|
94
|
+
message: 'Deployment not found',
|
|
95
|
+
deployId: 'text-davinci-003',
|
|
96
|
+
},
|
|
97
|
+
errorType: 'AzureBizError',
|
|
98
|
+
provider: 'azure',
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it('should return AgentRuntimeError for non-Azure errors', async () => {
|
|
104
|
+
// Arrange
|
|
105
|
+
const genericError = new Error('Generic Error');
|
|
106
|
+
|
|
107
|
+
(instance['client'].streamChatCompletions as Mock).mockRejectedValue(genericError);
|
|
108
|
+
|
|
109
|
+
// Act
|
|
110
|
+
try {
|
|
111
|
+
await instance.chat({
|
|
112
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
113
|
+
model: 'text-davinci-003',
|
|
114
|
+
temperature: 0,
|
|
115
|
+
});
|
|
116
|
+
} catch (e) {
|
|
117
|
+
// Assert
|
|
118
|
+
expect(e).toEqual({
|
|
119
|
+
endpoint: 'https://test.openai.azure.com/',
|
|
120
|
+
errorType: 'AgentRuntimeError',
|
|
121
|
+
provider: 'azure',
|
|
122
|
+
error: {
|
|
123
|
+
name: genericError.name,
|
|
124
|
+
cause: genericError.cause,
|
|
125
|
+
message: genericError.message,
|
|
126
|
+
},
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
});
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
describe('DEBUG', () => {
|
|
133
|
+
it('should call debugStream when DEBUG_CHAT_COMPLETION is 1', async () => {
|
|
134
|
+
// Arrange
|
|
135
|
+
const mockProdStream = new ReadableStream() as any;
|
|
136
|
+
const mockDebugStream = new ReadableStream({
|
|
137
|
+
start(controller) {
|
|
138
|
+
controller.enqueue('Debug stream content');
|
|
139
|
+
controller.close();
|
|
140
|
+
},
|
|
141
|
+
}) as any;
|
|
142
|
+
mockDebugStream.toReadableStream = () => mockDebugStream;
|
|
143
|
+
|
|
144
|
+
(instance['client'].streamChatCompletions as Mock).mockResolvedValue({
|
|
145
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
process.env.DEBUG_AZURE_CHAT_COMPLETION = '1';
|
|
149
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
|
150
|
+
|
|
151
|
+
// Act
|
|
152
|
+
await instance.chat({
|
|
153
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
154
|
+
model: 'text-davinci-003',
|
|
155
|
+
temperature: 0,
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
// Assert
|
|
159
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
|
160
|
+
|
|
161
|
+
// Restore
|
|
162
|
+
delete process.env.DEBUG_AZURE_CHAT_COMPLETION;
|
|
163
|
+
});
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
});
|
|
@@ -11,16 +11,15 @@ import { AgentRuntimeErrorType } from '../error';
|
|
|
11
11
|
import { ChatStreamPayload, ModelProvider } from '../types';
|
|
12
12
|
import { AgentRuntimeError } from '../utils/createError';
|
|
13
13
|
import { debugStream } from '../utils/debugStream';
|
|
14
|
-
import { DEBUG_CHAT_COMPLETION } from '../utils/env';
|
|
15
14
|
|
|
16
15
|
export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
17
|
-
|
|
16
|
+
client: OpenAIClient;
|
|
18
17
|
|
|
19
18
|
constructor(endpoint?: string, apikey?: string, apiVersion?: string) {
|
|
20
19
|
if (!apikey || !endpoint)
|
|
21
20
|
throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidAzureAPIKey);
|
|
22
21
|
|
|
23
|
-
this.
|
|
22
|
+
this.client = new OpenAIClient(endpoint, new AzureKeyCredential(apikey), { apiVersion });
|
|
24
23
|
|
|
25
24
|
this.baseURL = endpoint;
|
|
26
25
|
}
|
|
@@ -34,7 +33,7 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
34
33
|
// ============ 2. send api ============ //
|
|
35
34
|
|
|
36
35
|
try {
|
|
37
|
-
const response = await this.
|
|
36
|
+
const response = await this.client.streamChatCompletions(
|
|
38
37
|
model,
|
|
39
38
|
messages as ChatRequestMessage[],
|
|
40
39
|
params as GetChatCompletionsOptions,
|
|
@@ -45,7 +44,7 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
45
44
|
|
|
46
45
|
const [debug, prod] = stream.tee();
|
|
47
46
|
|
|
48
|
-
if (
|
|
47
|
+
if (process.env.DEBUG_AZURE_CHAT_COMPLETION === '1') {
|
|
49
48
|
debugStream(debug).catch(console.error);
|
|
50
49
|
}
|
|
51
50
|
|
|
@@ -53,10 +52,18 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
53
52
|
} catch (e) {
|
|
54
53
|
let error = e as { [key: string]: any; code: string; message: string };
|
|
55
54
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
55
|
+
if (error.code) {
|
|
56
|
+
switch (error.code) {
|
|
57
|
+
case 'DeploymentNotFound': {
|
|
58
|
+
error = { ...error, deployId: model };
|
|
59
|
+
}
|
|
59
60
|
}
|
|
61
|
+
} else {
|
|
62
|
+
error = {
|
|
63
|
+
cause: error.cause,
|
|
64
|
+
message: error.message,
|
|
65
|
+
name: error.name,
|
|
66
|
+
} as any;
|
|
60
67
|
}
|
|
61
68
|
|
|
62
69
|
const errorType = error.code
|
|
@@ -64,6 +71,7 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
64
71
|
: AgentRuntimeErrorType.AgentRuntimeError;
|
|
65
72
|
|
|
66
73
|
throw AgentRuntimeError.chat({
|
|
74
|
+
endpoint: this.baseURL,
|
|
67
75
|
error,
|
|
68
76
|
errorType,
|
|
69
77
|
provider: ModelProvider.Azure,
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
// @vitest-environment node
|
|
2
|
+
import { InvokeModelWithResponseStreamCommand } from '@aws-sdk/client-bedrock-runtime';
|
|
2
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
3
4
|
|
|
4
|
-
import {
|
|
5
|
-
|
|
6
|
-
} from '@aws-sdk/client-bedrock-runtime';
|
|
5
|
+
import { AgentRuntimeErrorType, ModelProvider } from '@/libs/agent-runtime';
|
|
6
|
+
|
|
7
7
|
import * as debugStreamModule from '../utils/debugStream';
|
|
8
8
|
import { LobeBedrockAI } from './index';
|
|
9
9
|
|
|
@@ -12,13 +12,13 @@ const provider = 'bedrock';
|
|
|
12
12
|
// Mock the console.error to avoid polluting test output
|
|
13
13
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
14
14
|
|
|
15
|
-
vi.mock(
|
|
15
|
+
vi.mock('@aws-sdk/client-bedrock-runtime', async (importOriginal) => {
|
|
16
16
|
const module = await importOriginal();
|
|
17
17
|
return {
|
|
18
18
|
...(module as any),
|
|
19
|
-
InvokeModelWithResponseStreamCommand: vi.fn()
|
|
20
|
-
}
|
|
21
|
-
})
|
|
19
|
+
InvokeModelWithResponseStreamCommand: vi.fn(),
|
|
20
|
+
};
|
|
21
|
+
});
|
|
22
22
|
|
|
23
23
|
let instance: LobeBedrockAI;
|
|
24
24
|
|
|
@@ -49,16 +49,44 @@ describe('LobeBedrockAI', () => {
|
|
|
49
49
|
});
|
|
50
50
|
|
|
51
51
|
describe('chat', () => {
|
|
52
|
+
it('should call invokeLlamaModel when model starts with "meta"', async () => {
|
|
53
|
+
// @ts-ignore
|
|
54
|
+
const spy = vi.spyOn(instance, 'invokeLlamaModel');
|
|
52
55
|
|
|
53
|
-
|
|
56
|
+
// Act
|
|
57
|
+
await instance.chat({
|
|
58
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
59
|
+
model: 'meta.llama:1',
|
|
60
|
+
temperature: 0,
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// Assert
|
|
64
|
+
expect(spy).toHaveBeenCalled();
|
|
65
|
+
});
|
|
54
66
|
|
|
67
|
+
it('should call invokeClaudeModel when model does not start with "meta"', async () => {
|
|
68
|
+
// @ts-ignore
|
|
69
|
+
const spy = vi.spyOn(instance, 'invokeClaudeModel');
|
|
70
|
+
|
|
71
|
+
// Act
|
|
72
|
+
await instance.chat({
|
|
73
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
74
|
+
model: 'anthropic.claude-v2:1',
|
|
75
|
+
temperature: 0,
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
// Assert
|
|
79
|
+
expect(spy).toHaveBeenCalled();
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
describe('Claude model', () => {
|
|
55
83
|
it('should return a Response on successful API call', async () => {
|
|
56
84
|
const result = await instance.chat({
|
|
57
85
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
58
86
|
model: 'anthropic.claude-v2:1',
|
|
59
87
|
temperature: 0,
|
|
60
88
|
});
|
|
61
|
-
|
|
89
|
+
|
|
62
90
|
// Assert
|
|
63
91
|
expect(result).toBeInstanceOf(Response);
|
|
64
92
|
});
|
|
@@ -73,7 +101,7 @@ describe('LobeBedrockAI', () => {
|
|
|
73
101
|
});
|
|
74
102
|
const mockResponse = Promise.resolve(mockStream);
|
|
75
103
|
(instance['client'].send as Mock).mockResolvedValue(mockResponse);
|
|
76
|
-
|
|
104
|
+
|
|
77
105
|
// Act
|
|
78
106
|
const result = await instance.chat({
|
|
79
107
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
@@ -81,12 +109,12 @@ describe('LobeBedrockAI', () => {
|
|
|
81
109
|
temperature: 0,
|
|
82
110
|
top_p: 1,
|
|
83
111
|
});
|
|
84
|
-
|
|
112
|
+
|
|
85
113
|
// Assert
|
|
86
114
|
expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
|
|
87
115
|
accept: 'application/json',
|
|
88
116
|
body: JSON.stringify({
|
|
89
|
-
anthropic_version:
|
|
117
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
90
118
|
max_tokens: 4096,
|
|
91
119
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
92
120
|
temperature: 0,
|
|
@@ -108,7 +136,7 @@ describe('LobeBedrockAI', () => {
|
|
|
108
136
|
});
|
|
109
137
|
const mockResponse = Promise.resolve(mockStream);
|
|
110
138
|
(instance['client'].send as Mock).mockResolvedValue(mockResponse);
|
|
111
|
-
|
|
139
|
+
|
|
112
140
|
// Act
|
|
113
141
|
const result = await instance.chat({
|
|
114
142
|
messages: [
|
|
@@ -119,12 +147,12 @@ describe('LobeBedrockAI', () => {
|
|
|
119
147
|
temperature: 0,
|
|
120
148
|
top_p: 1,
|
|
121
149
|
});
|
|
122
|
-
|
|
150
|
+
|
|
123
151
|
// Assert
|
|
124
152
|
expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
|
|
125
153
|
accept: 'application/json',
|
|
126
154
|
body: JSON.stringify({
|
|
127
|
-
anthropic_version:
|
|
155
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
128
156
|
max_tokens: 4096,
|
|
129
157
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
130
158
|
system: 'You are an awesome greeter',
|
|
@@ -147,7 +175,7 @@ describe('LobeBedrockAI', () => {
|
|
|
147
175
|
});
|
|
148
176
|
const mockResponse = Promise.resolve(mockStream);
|
|
149
177
|
(instance['client'].send as Mock).mockResolvedValue(mockResponse);
|
|
150
|
-
|
|
178
|
+
|
|
151
179
|
// Act
|
|
152
180
|
const result = await instance.chat({
|
|
153
181
|
max_tokens: 2048,
|
|
@@ -156,12 +184,12 @@ describe('LobeBedrockAI', () => {
|
|
|
156
184
|
temperature: 0.5,
|
|
157
185
|
top_p: 1,
|
|
158
186
|
});
|
|
159
|
-
|
|
187
|
+
|
|
160
188
|
// Assert
|
|
161
189
|
expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
|
|
162
190
|
accept: 'application/json',
|
|
163
191
|
body: JSON.stringify({
|
|
164
|
-
anthropic_version:
|
|
192
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
165
193
|
max_tokens: 2048,
|
|
166
194
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
167
195
|
temperature: 0.5,
|
|
@@ -172,7 +200,7 @@ describe('LobeBedrockAI', () => {
|
|
|
172
200
|
});
|
|
173
201
|
expect(result).toBeInstanceOf(Response);
|
|
174
202
|
});
|
|
175
|
-
|
|
203
|
+
|
|
176
204
|
it('should call Anthropic model without unsupported opions', async () => {
|
|
177
205
|
// Arrange
|
|
178
206
|
const mockStream = new ReadableStream({
|
|
@@ -183,7 +211,7 @@ describe('LobeBedrockAI', () => {
|
|
|
183
211
|
});
|
|
184
212
|
const mockResponse = Promise.resolve(mockStream);
|
|
185
213
|
(instance['client'].send as Mock).mockResolvedValue(mockResponse);
|
|
186
|
-
|
|
214
|
+
|
|
187
215
|
// Act
|
|
188
216
|
const result = await instance.chat({
|
|
189
217
|
frequency_penalty: 0.5, // Unsupported option
|
|
@@ -194,12 +222,12 @@ describe('LobeBedrockAI', () => {
|
|
|
194
222
|
temperature: 0.5,
|
|
195
223
|
top_p: 1,
|
|
196
224
|
});
|
|
197
|
-
|
|
225
|
+
|
|
198
226
|
// Assert
|
|
199
227
|
expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
|
|
200
228
|
accept: 'application/json',
|
|
201
229
|
body: JSON.stringify({
|
|
202
|
-
anthropic_version:
|
|
230
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
203
231
|
max_tokens: 2048,
|
|
204
232
|
messages: [{ content: 'Hello', role: 'user' }],
|
|
205
233
|
temperature: 0.5,
|
|
@@ -211,7 +239,156 @@ describe('LobeBedrockAI', () => {
|
|
|
211
239
|
expect(result).toBeInstanceOf(Response);
|
|
212
240
|
});
|
|
213
241
|
|
|
242
|
+
it('should call debugStream when DEBUG_BEDROCK_CHAT_COMPLETION is set to "1"', async () => {
|
|
243
|
+
// Arrange
|
|
244
|
+
process.env.DEBUG_BEDROCK_CHAT_COMPLETION = '1';
|
|
245
|
+
const spy = vi.spyOn(debugStreamModule, 'debugStream');
|
|
246
|
+
|
|
247
|
+
// Act
|
|
248
|
+
await instance.chat({
|
|
249
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
250
|
+
model: 'anthropic.claude-v2:1',
|
|
251
|
+
temperature: 0,
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
// Assert
|
|
255
|
+
expect(spy).toHaveBeenCalled();
|
|
256
|
+
|
|
257
|
+
// Clean up
|
|
258
|
+
delete process.env.DEBUG_BEDROCK_CHAT_COMPLETION;
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
it('should handle errors and throw AgentRuntimeError', async () => {
|
|
262
|
+
// Arrange
|
|
263
|
+
const errorMessage = 'An error occurred';
|
|
264
|
+
const errorMetadata = { statusCode: 500 };
|
|
265
|
+
const mockError = new Error(errorMessage);
|
|
266
|
+
(mockError as any).$metadata = errorMetadata;
|
|
267
|
+
(instance['client'].send as Mock).mockRejectedValue(mockError);
|
|
268
|
+
|
|
269
|
+
// Act & Assert
|
|
270
|
+
await expect(
|
|
271
|
+
instance.chat({
|
|
272
|
+
max_tokens: 100,
|
|
273
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
274
|
+
model: 'anthropic.claude-v2:1',
|
|
275
|
+
temperature: 0,
|
|
276
|
+
}),
|
|
277
|
+
).rejects.toThrow(
|
|
278
|
+
expect.objectContaining({
|
|
279
|
+
error: {
|
|
280
|
+
body: errorMetadata,
|
|
281
|
+
message: errorMessage,
|
|
282
|
+
type: 'Error',
|
|
283
|
+
},
|
|
284
|
+
errorType: AgentRuntimeErrorType.BedrockBizError,
|
|
285
|
+
provider: ModelProvider.Bedrock,
|
|
286
|
+
region: 'us-west-2',
|
|
287
|
+
}),
|
|
288
|
+
);
|
|
289
|
+
});
|
|
214
290
|
});
|
|
215
291
|
|
|
292
|
+
describe('Llama Model', () => {
|
|
293
|
+
it('should call Llama model with valid payload', async () => {
|
|
294
|
+
// Arrange
|
|
295
|
+
const mockStream = new ReadableStream({
|
|
296
|
+
start(controller) {
|
|
297
|
+
controller.enqueue('Hello, world!');
|
|
298
|
+
controller.close();
|
|
299
|
+
},
|
|
300
|
+
});
|
|
301
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
302
|
+
(instance['client'].send as Mock).mockResolvedValue(mockResponse);
|
|
303
|
+
|
|
304
|
+
// Act
|
|
305
|
+
const result = await instance.chat({
|
|
306
|
+
temperature: 0,
|
|
307
|
+
max_tokens: 100,
|
|
308
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
309
|
+
model: 'meta.llama:1',
|
|
310
|
+
});
|
|
311
|
+
|
|
312
|
+
// Assert
|
|
313
|
+
expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
|
|
314
|
+
accept: 'application/json',
|
|
315
|
+
body: JSON.stringify({
|
|
316
|
+
max_gen_len: 100,
|
|
317
|
+
prompt: '<s>[INST] Hello [/INST]',
|
|
318
|
+
}),
|
|
319
|
+
contentType: 'application/json',
|
|
320
|
+
modelId: 'meta.llama:1',
|
|
321
|
+
});
|
|
322
|
+
expect(result).toBeInstanceOf(Response);
|
|
323
|
+
});
|
|
324
|
+
|
|
325
|
+
it('should handle errors and throw AgentRuntimeError', async () => {
|
|
326
|
+
// Arrange
|
|
327
|
+
const errorMessage = 'An error occurred';
|
|
328
|
+
const errorMetadata = { statusCode: 500 };
|
|
329
|
+
const mockError = new Error(errorMessage);
|
|
330
|
+
(mockError as any).$metadata = errorMetadata;
|
|
331
|
+
(instance['client'].send as Mock).mockRejectedValue(mockError);
|
|
332
|
+
|
|
333
|
+
// Act & Assert
|
|
334
|
+
await expect(
|
|
335
|
+
instance.chat({
|
|
336
|
+
max_tokens: 100,
|
|
337
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
338
|
+
model: 'meta.llama:1',
|
|
339
|
+
temperature: 0,
|
|
340
|
+
}),
|
|
341
|
+
).rejects.toThrow(
|
|
342
|
+
expect.objectContaining({
|
|
343
|
+
error: {
|
|
344
|
+
body: errorMetadata,
|
|
345
|
+
message: errorMessage,
|
|
346
|
+
region: 'us-west-2',
|
|
347
|
+
type: 'Error',
|
|
348
|
+
},
|
|
349
|
+
errorType: AgentRuntimeErrorType.BedrockBizError,
|
|
350
|
+
provider: ModelProvider.Bedrock,
|
|
351
|
+
region: 'us-west-2',
|
|
352
|
+
}),
|
|
353
|
+
);
|
|
354
|
+
});
|
|
355
|
+
|
|
356
|
+
it('should call debugStream when DEBUG_BEDROCK_CHAT_COMPLETION is set to "1"', async () => {
|
|
357
|
+
// Arrange
|
|
358
|
+
process.env.DEBUG_BEDROCK_CHAT_COMPLETION = '1';
|
|
359
|
+
const spy = vi.spyOn(debugStreamModule, 'debugStream');
|
|
360
|
+
|
|
361
|
+
// Act
|
|
362
|
+
await instance.chat({
|
|
363
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
364
|
+
model: 'meta.llama:1',
|
|
365
|
+
temperature: 0,
|
|
366
|
+
});
|
|
367
|
+
|
|
368
|
+
// Assert
|
|
369
|
+
expect(spy).toHaveBeenCalled();
|
|
370
|
+
|
|
371
|
+
// Clean up
|
|
372
|
+
delete process.env.DEBUG_BEDROCK_CHAT_COMPLETION;
|
|
373
|
+
});
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
it('should call options.callback when provided', async () => {
|
|
377
|
+
// Arrange
|
|
378
|
+
const onStart = vi.fn();
|
|
379
|
+
|
|
380
|
+
// Act
|
|
381
|
+
await instance.chat(
|
|
382
|
+
{
|
|
383
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
384
|
+
model: 'anthropic.claude-v2:1',
|
|
385
|
+
temperature: 0,
|
|
386
|
+
},
|
|
387
|
+
{ callback: { onStart } },
|
|
388
|
+
);
|
|
389
|
+
|
|
390
|
+
// Assert
|
|
391
|
+
expect(onStart).toHaveBeenCalled();
|
|
392
|
+
});
|
|
216
393
|
});
|
|
217
394
|
});
|
|
@@ -2,23 +2,15 @@ import {
|
|
|
2
2
|
BedrockRuntimeClient,
|
|
3
3
|
InvokeModelWithResponseStreamCommand,
|
|
4
4
|
} from '@aws-sdk/client-bedrock-runtime';
|
|
5
|
-
import {
|
|
6
|
-
AWSBedrockLlama2Stream,
|
|
7
|
-
AWSBedrockStream,
|
|
8
|
-
StreamingTextResponse
|
|
9
|
-
} from 'ai';
|
|
5
|
+
import { AWSBedrockLlama2Stream, AWSBedrockStream, StreamingTextResponse } from 'ai';
|
|
10
6
|
import { experimental_buildLlama2Prompt } from 'ai/prompts';
|
|
11
7
|
|
|
12
8
|
import { LobeRuntimeAI } from '../BaseAI';
|
|
13
9
|
import { AgentRuntimeErrorType } from '../error';
|
|
14
|
-
import {
|
|
15
|
-
|
|
16
|
-
ChatStreamPayload,
|
|
17
|
-
ModelProvider,
|
|
18
|
-
} from '../types';
|
|
10
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
11
|
+
import { buildAnthropicMessages } from '../utils/anthropicHelpers';
|
|
19
12
|
import { AgentRuntimeError } from '../utils/createError';
|
|
20
13
|
import { debugStream } from '../utils/debugStream';
|
|
21
|
-
import { buildAnthropicMessages } from '../utils/anthropicHelpers';
|
|
22
14
|
|
|
23
15
|
export interface LobeBedrockAIParams {
|
|
24
16
|
accessKeyId?: string;
|
|
@@ -54,7 +46,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
54
46
|
|
|
55
47
|
private invokeClaudeModel = async (
|
|
56
48
|
payload: ChatStreamPayload,
|
|
57
|
-
options?: ChatCompetitionOptions
|
|
49
|
+
options?: ChatCompetitionOptions,
|
|
58
50
|
): Promise<StreamingTextResponse> => {
|
|
59
51
|
const { max_tokens, messages, model, temperature, top_p } = payload;
|
|
60
52
|
const system_message = messages.find((m) => m.role === 'system');
|
|
@@ -63,7 +55,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
63
55
|
const command = new InvokeModelWithResponseStreamCommand({
|
|
64
56
|
accept: 'application/json',
|
|
65
57
|
body: JSON.stringify({
|
|
66
|
-
anthropic_version:
|
|
58
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
67
59
|
max_tokens: max_tokens || 4096,
|
|
68
60
|
messages: buildAnthropicMessages(user_messages),
|
|
69
61
|
system: system_message?.content as string,
|
|
@@ -79,7 +71,11 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
79
71
|
const bedrockResponse = await this.client.send(command);
|
|
80
72
|
|
|
81
73
|
// Convert the response into a friendly text-stream
|
|
82
|
-
const stream = AWSBedrockStream(
|
|
74
|
+
const stream = AWSBedrockStream(
|
|
75
|
+
bedrockResponse,
|
|
76
|
+
options?.callback,
|
|
77
|
+
(chunk) => chunk.delta?.text,
|
|
78
|
+
);
|
|
83
79
|
|
|
84
80
|
const [debug, output] = stream.tee();
|
|
85
81
|
|
|
@@ -105,9 +101,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
105
101
|
}
|
|
106
102
|
};
|
|
107
103
|
|
|
108
|
-
private invokeLlamaModel = async (
|
|
109
|
-
payload: ChatStreamPayload
|
|
110
|
-
): Promise<StreamingTextResponse> => {
|
|
104
|
+
private invokeLlamaModel = async (payload: ChatStreamPayload): Promise<StreamingTextResponse> => {
|
|
111
105
|
const { max_tokens, messages, model } = payload;
|
|
112
106
|
const command = new InvokeModelWithResponseStreamCommand({
|
|
113
107
|
accept: 'application/json',
|
|
@@ -149,7 +143,6 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
149
143
|
});
|
|
150
144
|
}
|
|
151
145
|
};
|
|
152
|
-
|
|
153
146
|
}
|
|
154
147
|
|
|
155
148
|
export default LobeBedrockAI;
|