@lobehub/chat 0.147.1 → 0.147.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/.env.example +47 -56
  2. package/CHANGELOG.md +50 -0
  3. package/Dockerfile +3 -2
  4. package/README.md +2 -1
  5. package/README.zh-CN.md +7 -6
  6. package/docs/self-hosting/advanced/authentication.mdx +6 -8
  7. package/docs/self-hosting/advanced/authentication.zh-CN.mdx +6 -8
  8. package/docs/self-hosting/environment-variables/analytics.mdx +1 -1
  9. package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +1 -3
  10. package/docs/self-hosting/environment-variables/model-provider.mdx +19 -21
  11. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -2
  12. package/docs/self-hosting/environment-variables.mdx +2 -1
  13. package/docs/self-hosting/environment-variables.zh-CN.mdx +2 -1
  14. package/docs/self-hosting/examples/azure-openai.mdx +6 -26
  15. package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +6 -23
  16. package/docs/self-hosting/examples/ollama.mdx +10 -1
  17. package/docs/self-hosting/examples/ollama.zh-CN.mdx +10 -1
  18. package/docs/self-hosting/platform/docker.mdx +3 -3
  19. package/docs/self-hosting/platform/docker.zh-CN.mdx +3 -3
  20. package/docs/usage/features/multi-ai-providers.mdx +6 -0
  21. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +6 -1
  22. package/locales/ar/modelProvider.json +5 -0
  23. package/locales/bg-BG/modelProvider.json +5 -0
  24. package/locales/de-DE/modelProvider.json +5 -0
  25. package/locales/en-US/modelProvider.json +5 -0
  26. package/locales/es-ES/modelProvider.json +5 -0
  27. package/locales/fr-FR/modelProvider.json +5 -0
  28. package/locales/it-IT/modelProvider.json +5 -0
  29. package/locales/ja-JP/modelProvider.json +5 -0
  30. package/locales/ko-KR/modelProvider.json +5 -0
  31. package/locales/nl-NL/modelProvider.json +5 -0
  32. package/locales/pl-PL/modelProvider.json +5 -0
  33. package/locales/pt-BR/modelProvider.json +5 -0
  34. package/locales/ru-RU/modelProvider.json +5 -0
  35. package/locales/tr-TR/modelProvider.json +5 -0
  36. package/locales/vi-VN/modelProvider.json +5 -0
  37. package/locales/zh-CN/modelProvider.json +5 -0
  38. package/locales/zh-TW/modelProvider.json +5 -0
  39. package/package.json +2 -2
  40. package/src/app/api/chat/agentRuntime.ts +3 -2
  41. package/src/app/settings/llm/Azure/index.tsx +1 -1
  42. package/src/app/settings/llm/Google/index.tsx +1 -0
  43. package/src/app/settings/llm/components/ProviderConfig/index.tsx +1 -1
  44. package/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx +1 -1
  45. package/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx +1 -1
  46. package/src/app/settings/llm/components/ProviderModelList/ModelFetcher.tsx +3 -3
  47. package/src/app/settings/llm/components/ProviderModelList/index.tsx +2 -2
  48. package/src/config/server/provider.ts +2 -0
  49. package/src/features/AgentSetting/AgentConfig/ModelSelect.tsx +1 -1
  50. package/src/features/Conversation/Error/APIKeyForm/ProviderApiKeyForm.tsx +2 -2
  51. package/src/features/ModelSwitchPanel/index.tsx +1 -1
  52. package/src/libs/agent-runtime/google/index.ts +4 -2
  53. package/src/locales/default/modelProvider.ts +5 -0
  54. package/src/services/_auth.ts +5 -3
  55. package/src/services/_header.ts +3 -4
  56. package/src/services/chat.ts +1 -1
  57. package/src/services/ollama.ts +3 -2
  58. package/src/store/global/slices/settings/actions/llm.test.ts +1 -1
  59. package/src/store/global/slices/settings/actions/llm.ts +2 -2
  60. package/src/store/global/slices/settings/selectors/modelConfig.test.ts +35 -14
  61. package/src/store/global/slices/settings/selectors/modelConfig.ts +55 -118
@@ -30,4 +30,13 @@ docker run -d -p 3210:3210 -e OLLAMA_PROXY_URL=http://host.docker.internal:11434
30
30
 
31
31
  接下来,你就可以使用 LobeChat 与本地 LLM 对话了。
32
32
 
33
- 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/usage/providers/ollama)。
33
+ 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/docs/usage/providers/ollama)。
34
+
35
+ ## 非本地访问 Ollama
36
+
37
+ 由于 Ollama 默认参数在启动时仅设置了本地访问,所以跨域访问以及端口监听需要进行额外的环境变量设置
38
+ ```
39
+ set OLLAMA_ORIGINS=*
40
+ set OLLAMA_HOST=0.0.0.0:11434
41
+ ```
42
+ 详细配置方法可以参考 [Ollama 官方文档](https://ollama.com/docs/configuration)。
@@ -34,7 +34,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
34
34
  ```fish
35
35
  $ apt install docker.io
36
36
  ```
37
-
37
+
38
38
  </Tab>
39
39
 
40
40
  <Tab>
@@ -43,7 +43,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
43
43
  ```
44
44
 
45
45
  </Tab>
46
-
46
+
47
47
  </Tabs>
48
48
 
49
49
  ### Docker Command Deployment
@@ -106,7 +106,7 @@ First, create a `lobe.env` configuration file with various environment variables
106
106
  OPENAI_API_KEY=sk-xxxx
107
107
  OPENAI_PROXY_URL=https://api-proxy.com/v1
108
108
  ACCESS_CODE=arthals2333
109
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
109
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
110
110
  ```
111
111
 
112
112
  Then, you can use the following script to automate the update:
@@ -33,7 +33,7 @@ tags:
33
33
  ```fish
34
34
  $ apt install docker.io
35
35
  ```
36
-
36
+
37
37
  </Tab>
38
38
 
39
39
  <Tab>
@@ -42,7 +42,7 @@ tags:
42
42
  ```
43
43
 
44
44
  </Tab>
45
-
45
+
46
46
  </Tabs>
47
47
 
48
48
  ### Docker 指令部署
@@ -104,7 +104,7 @@ $ docker run -d -p 3210:3210 \
104
104
  OPENAI_API_KEY=sk-xxxx
105
105
  OPENAI_PROXY_URL=https://api-proxy.com/v1
106
106
  ACCESS_CODE=arthals2333
107
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
107
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
108
108
  ```
109
109
 
110
110
  然后,你可以使用以下脚本来自动更新:
@@ -11,6 +11,7 @@ tags:
11
11
  - Google AI Gemini
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
+ - Together AI
14
15
  - local model support
15
16
  - Ollama
16
17
  ---
@@ -35,9 +36,14 @@ In this way, LobeChat can more flexibly adapt to the needs of different users, w
35
36
  We have implemented support for the following model service providers:
36
37
 
37
38
  - **AWS Bedrock**: Integrated with AWS Bedrock service, supporting models such as **Claude / LLama2**, providing powerful natural language processing capabilities. [Learn more](https://aws.amazon.com/cn/bedrock)
39
+ - **Anthropic (Claude)**: Accessed Anthropic's **Claude** series models, including Claude 3 and Claude 2, with breakthroughs in multi-modal capabilities and extended context, setting a new industry benchmark. [Learn more](https://www.anthropic.com/claude)
38
40
  - **Google AI (Gemini Pro, Gemini Vision)**: Access to Google's **Gemini** series models, including Gemini and Gemini Pro, to support advanced language understanding and generation. [Learn more](https://deepmind.google/technologies/gemini/)
39
41
  - **ChatGLM**: Added the **ChatGLM** series models from Zhipuai (GLM-4/GLM-4-vision/GLM-3-turbo), providing users with another efficient conversation model choice. [Learn more](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (Dark Side of the Moon)**: Integrated with the Moonshot series models, an innovative AI startup from China, aiming to provide deeper conversation understanding. [Learn more](https://www.moonshot.cn/)
43
+ - **Groq**: Accessed Groq's AI models, efficiently processing message sequences and generating responses, capable of multi-turn dialogues and single-interaction tasks. [Learn more](https://groq.com/)
44
+ - **OpenRouter**: Supports routing of models including **Claude 3**, **Gemma**, **Mistral**, **Llama2** and **Cohere**, with intelligent routing optimization to improve usage efficiency, open and flexible. [Learn more](https://openrouter.ai/)
45
+ - **01.AI (Yi Model)**: Integrated the 01.AI models, with series of APIs featuring fast inference speed, which not only shortened the processing time, but also maintained excellent model performance. [Learn more](https://01.ai/)
46
+ - **Together.ai**: Over 100 leading open-source Chat, Language, Image, Code, and Embedding models are available through the Together Inference API. For these models you pay just for what you use. [Learn more](https://www.together.ai/)
41
47
 
42
48
  At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
43
49
 
@@ -12,6 +12,7 @@ tags:
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
14
  - 01 AI
15
+ - Together AI
15
16
  - Ollama
16
17
  ---
17
18
 
@@ -36,9 +37,13 @@ tags:
36
37
 
37
38
  - **AWS Bedrock**:集成了 AWS Bedrock 服务,支持了 **Claude / LLama2** 等模型,提供了强大的自然语言处理能力。[了解更多](https://aws.amazon.com/cn/bedrock)
38
39
  - **Google AI (Gemini Pro、Gemini Vision)**:接入了 Google 的 **Gemini** 系列模型,包括 Gemini 和 Gemini Pro,以支持更高级的语言理解和生成。[了解更多](https://deepmind.google/technologies/gemini/)
40
+ - **Anthropic (Claude)**:接入了 Anthropic 的 **Claude** 系列模型,包括 Claude 3 和 Claude 2,多模态突破,超长上下文,树立行业新基准。[了解更多](https://www.anthropic.com/claude)
39
41
  - **ChatGLM**:加入了智谱的 **ChatGLM** 系列模型(GLM-4/GLM-4-vision/GLM-3-turbo),为用户提供了另一种高效的会话模型选择。[了解更多](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (月之暗面)**:集成了 Moonshot 系列模型,这是一家来自中国的创新性 AI 创业公司,旨在提供更深层次的会话理解。[了解更多](https://www.moonshot.cn/)
41
- - **01 AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
43
+ - **Together.ai**:集成部署了数百种开源模型和向量模型,无需本地部署即可随时访问这些模型。[了解更多](https://www.together.ai/)
44
+ - **01.AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
45
+ - **Groq**:接入了 Groq 的 AI 模型,高效处理消息序列,生成回应,胜任多轮对话及单次交互任务。[了解更多](https://groq.com/)
46
+ - **OpenRouter**:其支持包括 **Claude 3**,**Gemma**,**Mistral**,**Llama2**和**Cohere**等模型路由,支持智能路由优化,提升使用效率,开放且灵活。[了解更多](https://openrouter.ai/)
42
47
 
43
48
  同时,我们也在计划支持更多的模型服务商,如 Replicate 和 Perplexity 等,以进一步丰富我们的服务商库。如果你希望让 LobeChat 支持你喜爱的服务商,欢迎加入我们的[社区讨论](https://github.com/lobehub/lobe-chat/discussions/1284)。
44
49
 
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "بالإضافة إلى العنوان الافتراضي، يجب أن يتضمن http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "عنوان وكيل API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "أدخل مفتاح API الخاص بـ Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Изисква се адрес, включително http(s)://, освен ако не е по подразбиране",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Адрес на API прокси"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Въведете API Key, получен от Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Geben Sie Ihren API-Key von Google ein",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Must include http(s):// besides the default address",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API Proxy Address"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Enter the API Key from Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Aparte de la dirección predeterminada, debe incluir http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Dirección del proxy de la API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Introduce la clave API proporcionada por Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Incluez http(s):// en plus de l'adresse par défaut",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Adresse du proxy API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Saisissez la clé API de Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Indirizzo dell'API Proxy"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Inserisci la chiave API da Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "デフォルトのアドレスに加えて、http(s)://を含める必要があります",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "APIプロキシアドレス"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google の API Key を入力してください",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다.",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 프록시 주소"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google에서 제공하는 API 키를 입력하세요.",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Voer de API Key van Google in",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Wprowadź klucz API uzyskany od Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Além do endereço padrão, deve incluir http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Endereço do Proxy da API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Insira sua API Key fornecida pelo Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Помимо адреса по умолчанию, должен включать http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Адрес прокси-API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Введите свой API Key от Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google'dan gelen API Key'i girin",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Địa chỉ Proxy API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Nhập API Key từ Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "填入来自 Google 的 API Key",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除了預設地址外,必須包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "填入來自 Google 的 API 金鑰",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.147.1",
3
+ "version": "0.147.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -86,7 +86,7 @@
86
86
  "@aws-sdk/client-bedrock-runtime": "^3.549.0",
87
87
  "@azure/openai": "^1.0.0-beta.12",
88
88
  "@cfworker/json-schema": "^1.12.8",
89
- "@google/generative-ai": "^0.3.1",
89
+ "@google/generative-ai": "^0.5.0",
90
90
  "@icons-pack/react-simple-icons": "^9.4.0",
91
91
  "@lobehub/chat-plugin-sdk": "latest",
92
92
  "@lobehub/chat-plugins-gateway": "latest",
@@ -224,10 +224,11 @@ class AgentRuntime {
224
224
  }
225
225
 
226
226
  private static initGoogle(payload: JWTPayload) {
227
- const { GOOGLE_API_KEY } = getServerConfig();
227
+ const { GOOGLE_API_KEY, GOOGLE_PROXY_URL } = getServerConfig();
228
228
  const apiKey = apiKeyManager.pick(payload?.apiKey || GOOGLE_API_KEY);
229
+ const baseURL = payload?.endpoint || GOOGLE_PROXY_URL;
229
230
 
230
- return new LobeGoogleAI({ apiKey });
231
+ return new LobeGoogleAI({ apiKey, baseURL });
231
232
  }
232
233
 
233
234
  private static initBedrock(payload: JWTPayload) {
@@ -34,7 +34,7 @@ const AzureOpenAIProvider = memo(() => {
34
34
 
35
35
  // Get the first model card's deployment name as the check model
36
36
  const checkModel = useGlobalStore((s) => {
37
- const chatModelCards = modelConfigSelectors.providerModelCards(providerKey)(s);
37
+ const chatModelCards = modelConfigSelectors.getModelCardsByProviderId(providerKey)(s);
38
38
 
39
39
  if (chatModelCards.length > 0) {
40
40
  return chatModelCards[0].deploymentName;
@@ -12,6 +12,7 @@ const GoogleProvider = memo(() => {
12
12
  <ProviderConfig
13
13
  checkModel={'gemini-pro'}
14
14
  provider={ModelProvider.Google}
15
+ showEndpoint
15
16
  title={
16
17
  <Flexbox align={'center'} gap={8} horizontal>
17
18
  <Google.BrandColor size={28} />
@@ -54,7 +54,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
54
54
  const [toggleProviderEnabled, setSettings, enabled] = useGlobalStore((s) => [
55
55
  s.toggleProviderEnabled,
56
56
  s.setSettings,
57
- modelConfigSelectors.providerEnabled(provider)(s),
57
+ modelConfigSelectors.isProviderEnabled(provider)(s),
58
58
  ]);
59
59
 
60
60
  useSyncSettings(form);
@@ -29,7 +29,7 @@ const CustomModelOption = memo<CustomModelOptionProps>(({ id, provider }) => {
29
29
  s.removeEnabledModels,
30
30
  ]);
31
31
  const modelCard = useGlobalStore(
32
- modelConfigSelectors.getCustomModelCardById({ id, provider }),
32
+ modelConfigSelectors.getCustomModelCard({ id, provider }),
33
33
  isEqual,
34
34
  );
35
35
 
@@ -27,7 +27,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, pro
27
27
  ]);
28
28
 
29
29
  const modelCard = useGlobalStore(
30
- modelConfigSelectors.getCustomModelCardById({ id, provider: editingProvider }),
30
+ modelConfigSelectors.getCustomModelCard({ id, provider: editingProvider }),
31
31
  isEqual,
32
32
  );
33
33
 
@@ -36,12 +36,12 @@ const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
36
36
  s.useFetchProviderModelList,
37
37
  s.setModelProviderConfig,
38
38
  ]);
39
- const enabledAutoFetch = useGlobalStore(modelConfigSelectors.enabledAutoFetchModels(provider));
39
+ const enabledAutoFetch = useGlobalStore(modelConfigSelectors.isAutoFetchModelsEnabled(provider));
40
40
  const latestFetchTime = useGlobalStore(
41
- (s) => modelConfigSelectors.providerConfig(provider)(s)?.latestFetchTime,
41
+ (s) => modelConfigSelectors.getConfigByProviderId(provider)(s)?.latestFetchTime,
42
42
  );
43
43
  const totalModels = useGlobalStore(
44
- (s) => modelConfigSelectors.providerModelCards(provider)(s).length,
44
+ (s) => modelConfigSelectors.getModelCardsByProviderId(provider)(s).length,
45
45
  );
46
46
 
47
47
  const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch);
@@ -51,7 +51,7 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
51
51
  ]);
52
52
 
53
53
  const chatModelCards = useGlobalStore(
54
- modelConfigSelectors.providerModelCards(provider),
54
+ modelConfigSelectors.getModelCardsByProviderId(provider),
55
55
  isEqual,
56
56
  );
57
57
 
@@ -60,7 +60,7 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
60
60
  isEqual,
61
61
  );
62
62
  const enabledModels = useGlobalStore(
63
- modelConfigSelectors.providerEnableModels(provider),
63
+ modelConfigSelectors.getEnableModelsByProviderId(provider),
64
64
  isEqual,
65
65
  );
66
66
 
@@ -28,6 +28,7 @@ declare global {
28
28
  // Google Provider
29
29
  ENABLED_GOOGLE?: string;
30
30
  GOOGLE_API_KEY?: string;
31
+ GOOGLE_PROXY_URL?: string;
31
32
 
32
33
  // Moonshot Provider
33
34
  ENABLED_MOONSHOT?: string;
@@ -154,6 +155,7 @@ export const getProviderConfig = () => {
154
155
 
155
156
  ENABLED_GOOGLE: !!GOOGLE_API_KEY,
156
157
  GOOGLE_API_KEY,
158
+ GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
157
159
 
158
160
  ENABLED_PERPLEXITY: !!PERPLEXITY_API_KEY,
159
161
  PERPLEXITY_API_KEY,
@@ -25,7 +25,7 @@ interface ModelOption {
25
25
 
26
26
  const ModelSelect = memo(() => {
27
27
  const [model, updateConfig] = useStore((s) => [s.config.model, s.setAgentConfig]);
28
- const enabledList = useGlobalStore(modelConfigSelectors.enabledModelProviderList, isEqual);
28
+ const enabledList = useGlobalStore(modelConfigSelectors.providerListForModelSelect, isEqual);
29
29
  const { styles } = useStyles();
30
30
 
31
31
  const options = useMemo<SelectProps['options']>(() => {
@@ -24,8 +24,8 @@ const ProviderApiKeyForm = memo<ProviderApiKeyFormProps>(
24
24
  const [showProxy, setShow] = useState(false);
25
25
 
26
26
  const [apiKey, proxyUrl, setConfig] = useGlobalStore((s) => [
27
- modelConfigSelectors.providerConfig(provider)(s)?.apiKey,
28
- modelConfigSelectors.providerConfig(provider)(s)?.endpoint,
27
+ modelConfigSelectors.getConfigByProviderId(provider)(s)?.apiKey,
28
+ modelConfigSelectors.getConfigByProviderId(provider)(s)?.endpoint,
29
29
  s.setModelProviderConfig,
30
30
  ]);
31
31
 
@@ -44,7 +44,7 @@ const ModelSwitchPanel = memo<PropsWithChildren>(({ children }) => {
44
44
  const updateAgentConfig = useSessionStore((s) => s.updateAgentConfig);
45
45
 
46
46
  const router = useRouter();
47
- const enabledList = useGlobalStore(modelConfigSelectors.enabledModelProviderList, isEqual);
47
+ const enabledList = useGlobalStore(modelConfigSelectors.providerListForModelSelect, isEqual);
48
48
 
49
49
  const items = useMemo(() => {
50
50
  const getModelItems = (provider: ModelProviderCard) => {
@@ -27,11 +27,13 @@ enum HarmBlockThreshold {
27
27
 
28
28
  export class LobeGoogleAI implements LobeRuntimeAI {
29
29
  private client: GoogleGenerativeAI;
30
+ baseURL?: string;
30
31
 
31
- constructor({ apiKey }: { apiKey?: string }) {
32
+ constructor({ apiKey, baseURL }: { apiKey?: string; baseURL?: string }) {
32
33
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidGoogleAPIKey);
33
34
 
34
35
  this.client = new GoogleGenerativeAI(apiKey);
36
+ this.baseURL = baseURL;
35
37
  }
36
38
 
37
39
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
@@ -70,7 +72,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
70
72
  },
71
73
  ],
72
74
  },
73
- { apiVersion: 'v1beta' },
75
+ { apiVersion: 'v1beta', baseUrl: this.baseURL },
74
76
  )
75
77
  .generateContentStream({ contents });
76
78
 
@@ -69,6 +69,11 @@ export default {
69
69
  },
70
70
  },
71
71
  google: {
72
+ endpoint: {
73
+ desc: '除默认地址外,必须包含 http(s)://',
74
+ placeholder: 'https://generativelanguage.googleapis.com',
75
+ title: 'API 代理地址',
76
+ },
72
77
  title: 'Google',
73
78
  token: {
74
79
  desc: '填入来自 Google 的 API Key',
@@ -29,13 +29,15 @@ export const getProviderAuthPayload = (provider: string) => {
29
29
  }
30
30
 
31
31
  case ModelProvider.Ollama: {
32
- const endpoint = modelConfigSelectors.ollamaProxyUrl(useGlobalStore.getState());
32
+ const config = modelConfigSelectors.ollamaConfig(useGlobalStore.getState());
33
33
 
34
- return { endpoint };
34
+ return { endpoint: config?.endpoint };
35
35
  }
36
36
 
37
37
  default: {
38
- const config = modelConfigSelectors.providerConfig(provider)(useGlobalStore.getState());
38
+ const config = modelConfigSelectors.getConfigByProviderId(provider)(
39
+ useGlobalStore.getState(),
40
+ );
39
41
 
40
42
  return { apiKey: config?.apiKey, endpoint: config?.endpoint };
41
43
  }
@@ -8,14 +8,13 @@ import { modelConfigSelectors, settingsSelectors } from '@/store/global/selector
8
8
  */
9
9
  // eslint-disable-next-line no-undef
10
10
  export const createHeaderWithOpenAI = (header?: HeadersInit): HeadersInit => {
11
- const apiKey = modelConfigSelectors.openAIAPIKey(useGlobalStore.getState()) || '';
12
- const endpoint = modelConfigSelectors.openAIProxyUrl(useGlobalStore.getState()) || '';
11
+ const openAIConfig = modelConfigSelectors.openAIConfig(useGlobalStore.getState());
13
12
 
14
13
  // eslint-disable-next-line no-undef
15
14
  return {
16
15
  ...header,
17
16
  [LOBE_CHAT_ACCESS_CODE]: settingsSelectors.password(useGlobalStore.getState()),
18
- [OPENAI_API_KEY_HEADER_KEY]: apiKey,
19
- [OPENAI_END_POINT]: endpoint,
17
+ [OPENAI_API_KEY_HEADER_KEY]: openAIConfig.apiKey || '',
18
+ [OPENAI_END_POINT]: openAIConfig.endpoint || '',
20
19
  };
21
20
  };
@@ -137,7 +137,7 @@ class ChatService {
137
137
 
138
138
  // if the provider is Azure, get the deployment name as the request model
139
139
  if (provider === ModelProvider.Azure) {
140
- const chatModelCards = modelConfigSelectors.providerModelCards(provider)(
140
+ const chatModelCards = modelConfigSelectors.getModelCardsByProviderId(provider)(
141
141
  useGlobalStore.getState(),
142
142
  );
143
143
 
@@ -11,8 +11,9 @@ const DEFAULT_BASE_URL = 'http://127.0.0.1:11434/v1';
11
11
 
12
12
  class OllamaService {
13
13
  getHost = (): string => {
14
- const endpoint = modelConfigSelectors.ollamaProxyUrl(useGlobalStore.getState());
15
- const url = new URL(endpoint || DEFAULT_BASE_URL);
14
+ const config = modelConfigSelectors.ollamaConfig(useGlobalStore.getState());
15
+
16
+ const url = new URL(config.endpoint || DEFAULT_BASE_URL);
16
17
  return url.host;
17
18
  };
18
19
 
@@ -46,7 +46,7 @@ describe('LLMSettingsSliceAction', () => {
46
46
  const payload: CustomModelCardDispatch = { type: 'add', modelCard: { id: 'test-id' } };
47
47
 
48
48
  // Mock the selector to return undefined
49
- vi.spyOn(modelConfigSelectors, 'providerConfig').mockReturnValue(() => undefined);
49
+ vi.spyOn(modelConfigSelectors, 'getConfigByProviderId').mockReturnValue(() => undefined);
50
50
  vi.spyOn(result.current, 'setModelProviderConfig');
51
51
 
52
52
  await act(async () => {
@@ -37,7 +37,7 @@ export const llmSettingsSlice: StateCreator<
37
37
  LLMSettingsAction
38
38
  > = (set, get) => ({
39
39
  dispatchCustomModelCards: async (provider, payload) => {
40
- const prevState = modelConfigSelectors.providerConfig(provider)(get());
40
+ const prevState = modelConfigSelectors.getConfigByProviderId(provider)(get());
41
41
 
42
42
  if (!prevState) return;
43
43
 
@@ -47,7 +47,7 @@ export const llmSettingsSlice: StateCreator<
47
47
  },
48
48
 
49
49
  removeEnabledModels: async (provider, model) => {
50
- const config = modelConfigSelectors.providerConfig(provider)(get());
50
+ const config = modelConfigSelectors.getConfigByProviderId(provider)(get());
51
51
 
52
52
  await get().setModelProviderConfig(provider, {
53
53
  enabledModels: config?.enabledModels?.filter((s) => s !== model).filter(Boolean),