@lobehub/chat 0.147.2 → 0.147.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/.env.example +47 -56
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +3 -2
  4. package/README.md +2 -1
  5. package/README.zh-CN.md +7 -6
  6. package/docs/self-hosting/advanced/authentication.mdx +6 -8
  7. package/docs/self-hosting/advanced/authentication.zh-CN.mdx +6 -8
  8. package/docs/self-hosting/environment-variables/analytics.mdx +1 -1
  9. package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +1 -3
  10. package/docs/self-hosting/environment-variables/model-provider.mdx +19 -21
  11. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -2
  12. package/docs/self-hosting/environment-variables.mdx +2 -1
  13. package/docs/self-hosting/environment-variables.zh-CN.mdx +2 -1
  14. package/docs/self-hosting/examples/azure-openai.mdx +6 -26
  15. package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +6 -23
  16. package/docs/self-hosting/examples/ollama.mdx +10 -1
  17. package/docs/self-hosting/examples/ollama.zh-CN.mdx +10 -1
  18. package/docs/self-hosting/platform/docker.mdx +3 -3
  19. package/docs/self-hosting/platform/docker.zh-CN.mdx +3 -3
  20. package/docs/usage/features/multi-ai-providers.mdx +6 -0
  21. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +6 -1
  22. package/locales/ar/modelProvider.json +5 -0
  23. package/locales/bg-BG/modelProvider.json +5 -0
  24. package/locales/de-DE/modelProvider.json +5 -0
  25. package/locales/en-US/modelProvider.json +5 -0
  26. package/locales/es-ES/modelProvider.json +5 -0
  27. package/locales/fr-FR/modelProvider.json +5 -0
  28. package/locales/it-IT/modelProvider.json +5 -0
  29. package/locales/ja-JP/modelProvider.json +5 -0
  30. package/locales/ko-KR/modelProvider.json +5 -0
  31. package/locales/nl-NL/modelProvider.json +5 -0
  32. package/locales/pl-PL/modelProvider.json +5 -0
  33. package/locales/pt-BR/modelProvider.json +5 -0
  34. package/locales/ru-RU/modelProvider.json +5 -0
  35. package/locales/tr-TR/modelProvider.json +5 -0
  36. package/locales/vi-VN/modelProvider.json +5 -0
  37. package/locales/zh-CN/modelProvider.json +5 -0
  38. package/locales/zh-TW/modelProvider.json +5 -0
  39. package/package.json +2 -2
  40. package/src/app/api/chat/agentRuntime.ts +3 -2
  41. package/src/app/settings/llm/Google/index.tsx +1 -0
  42. package/src/config/server/provider.ts +2 -0
  43. package/src/libs/agent-runtime/google/index.ts +4 -2
  44. package/src/locales/default/modelProvider.ts +5 -0
package/.env.example CHANGED
@@ -1,14 +1,16 @@
1
1
  # add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array.
2
- #ACCESS_CODE=lobe66
2
+ # ACCESS_CODE=lobe66
3
3
 
4
4
  # Specify your API Key selection method, currently supporting `random` and `turn`.
5
5
  # API_KEY_SELECT_MODE=random
6
6
 
7
7
 
8
8
  ########################################
9
- ############ OpenAI Service ############
9
+ ######## Model Provider Service ########
10
10
  ########################################
11
11
 
12
+ ### OpenAI ###
13
+
12
14
  # you openai api key
13
15
  OPENAI_API_KEY=sk-xxxxxxxxx
14
16
 
@@ -16,99 +18,88 @@ OPENAI_API_KEY=sk-xxxxxxxxx
16
18
  # OPENAI_PROXY_URL=https://api.openai.com/v1
17
19
 
18
20
  # add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106
19
- #OPENAI_ENABLED_MODELS=gpt-3.5-turbo
21
+ # OPENAI_MODEL_LIST=gpt-3.5-turbo
20
22
 
21
- ########################################
22
- ######### Azure OpenAI Service #########
23
- ########################################
24
- # you can learn azure OpenAI Service on https://learn.microsoft.com/en-us/azure/ai-services/openai/overview
25
23
 
24
+ ### Azure OpenAI ###
25
+
26
+ # you can learn azure OpenAI Service on https://learn.microsoft.com/en-us/azure/ai-services/openai/overview
26
27
  # use Azure OpenAI Service by uncomment the following line
27
28
 
28
29
  # The API key you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
29
30
  # AZURE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
30
31
 
31
32
  # The endpoint you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
32
- # OPENAI_PROXY_URL=https://docs-test-001.openai.azure.com
33
+ # AZURE_ENDPOINT=https://docs-test-001.openai.azure.com
33
34
 
34
35
  # Azure's API version, follows the YYYY-MM-DD format
35
- # AZURE_API_VERSION=2023-08-01-preview
36
+ # AZURE_API_VERSION=2024-02-01
36
37
 
37
- ########################################
38
- ############ ZhiPu AI Service ##########
39
- ########################################
40
38
 
41
- #ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
39
+ ### Anthropic Service ####
42
40
 
43
- ########################################
44
- ########## Moonshot AI Service #########
45
- ########################################
41
+ # ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
46
42
 
47
- #MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
43
+ # use a proxy to connect to the Anthropic API
44
+ # ANTHROPIC_PROXY_URL=https://api.anthropic.com
48
45
 
49
- ########################################
50
- ########### Google AI Service ##########
51
- ########################################
52
46
 
53
- #GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
47
+ ### Google AI ####
54
48
 
55
- ########################################
56
- ######### AWS Bedrock Service ##########
57
- ########################################
49
+ # GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
58
50
 
59
- #AWS_REGION=us-east-1
60
- #AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
61
- #AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
62
51
 
63
- ########################################
64
- ######### Ollama AI Service ##########
65
- ########################################
52
+ ### AWS Bedrock ###
53
+
54
+ # AWS_REGION=us-east-1
55
+ # AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
56
+ # AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
57
+
58
+
59
+ ### Ollama AI ####
66
60
 
67
61
  # You can use ollama to get and run LLM locally, learn more about it via https://github.com/ollama/ollama
62
+
68
63
  # The local/remote ollama service url
69
64
  # OLLAMA_PROXY_URL=http://127.0.0.1:11434/v1
70
65
 
71
- ########### Mistral AI Service ##########
72
- ########################################
66
+ # OLLAMA_MODEL_LIST=your_ollama_model_names
67
+
68
+
69
+ ### OpenRouter Service ###
70
+
71
+ # OPENROUTER_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
72
+ # OPENROUTER_MODEL_LIST=model1,model2,model3
73
+
74
+
75
+ ### Mistral AI ###
73
76
 
74
77
  # MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
75
78
 
76
- ########################################
77
- ######### Perplexity Service ###########
78
- ########################################
79
+ ### Perplexity Service ###
79
80
 
80
81
  # PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
81
82
 
82
- ########################################
83
- ######### Anthropic Service ############
84
- ########################################
83
+ ### Groq Service ####
85
84
 
86
- # ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
85
+ # GROQ_API_KEY=gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
87
86
 
88
- ########################################
89
- ############ Groq Service ##############
90
- ########################################
87
+ #### 01.AI Service ####
91
88
 
92
- # GROQ_API_KEY=gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
89
+ # ZEROONE_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
93
90
 
94
- ########################################
95
- ######### OpenRouter Service ##########
96
- ########################################
91
+ ### TogetherAI Service ###
97
92
 
98
- #OPENROUTER_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
99
- #OPENROUTER_CUSTOM_MODELS=model1,model2,model3
93
+ # TOGETHERAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
100
94
 
101
- ########################################
102
- ######### 01.AI Service ##########
103
- ########################################
95
+ ### ZhiPu AI ###
104
96
 
105
- #ZEROONE_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
97
+ # ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
106
98
 
107
- ########################################
108
- ######### TogetherAI Service ##########
109
- ########################################
99
+ ### Moonshot AI ####
100
+
101
+ # MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
110
102
 
111
- #TOGETHERAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
112
103
 
113
104
  ########################################
114
105
  ############ Market Service ############
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 0.147.3](https://github.com/lobehub/lobe-chat/compare/v0.147.2...v0.147.3)
6
+
7
+ <sup>Released on **2024-04-11**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Support Google Proxy URL.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Support Google Proxy URL, closes [#1979](https://github.com/lobehub/lobe-chat/issues/1979) ([fbf2c24](https://github.com/lobehub/lobe-chat/commit/fbf2c24))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 0.147.2](https://github.com/lobehub/lobe-chat/compare/v0.147.1...v0.147.2)
6
31
 
7
32
  <sup>Released on **2024-04-11**</sup>
package/Dockerfile CHANGED
@@ -58,13 +58,13 @@ ENV PORT=3210
58
58
 
59
59
  # General Variables
60
60
  ENV ACCESS_CODE ""
61
- ENV CUSTOM_MODELS ""
62
61
 
63
62
  ENV API_KEY_SELECT_MODE ""
64
63
 
65
64
  # OpenAI
66
65
  ENV OPENAI_API_KEY ""
67
66
  ENV OPENAI_PROXY_URL ""
67
+ ENV OPENAI_MODEL_LIST ""
68
68
 
69
69
  # Azure OpenAI
70
70
  ENV USE_AZURE_OPENAI ""
@@ -82,6 +82,7 @@ ENV MOONSHOT_API_KEY ""
82
82
 
83
83
  # Ollama
84
84
  ENV OLLAMA_PROXY_URL ""
85
+ ENV OLLAMA_MODEL_LIST ""
85
86
 
86
87
  # Perplexity
87
88
  ENV PERPLEXITY_API_KEY ""
@@ -94,7 +95,7 @@ ENV MISTRAL_API_KEY ""
94
95
 
95
96
  # OpenRouter
96
97
  ENV OPENROUTER_API_KEY ""
97
- ENV OPENROUTER_CUSTOM_MODELS ""
98
+ ENV OPENROUTER_MODEL_LIST ""
98
99
 
99
100
  # 01.AI
100
101
  ENV ZEROONE_API_KEY ""
package/README.md CHANGED
@@ -128,6 +128,7 @@ We have implemented support for the following model service providers:
128
128
  - **Groq**: Accessed Groq's AI models, efficiently processing message sequences and generating responses, capable of multi-turn dialogues and single-interaction tasks. [Learn more](https://groq.com/)
129
129
  - **OpenRouter**: Supports routing of models including **Claude 3**, **Gemma**, **Mistral**, **Llama2** and **Cohere**, with intelligent routing optimization to improve usage efficiency, open and flexible. [Learn more](https://openrouter.ai/)
130
130
  - **01.AI (Yi Model)**: Integrated the 01.AI models, with series of APIs featuring fast inference speed, which not only shortened the processing time, but also maintained excellent model performance. [Learn more](https://01.ai/)
131
+ - **Together.ai**: Over 100 leading open-source Chat, Language, Image, Code, and Embedding models are available through the Together Inference API. For these models you pay just for what you use. [Learn more](https://www.together.ai/)
131
132
 
132
133
  At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
133
134
 
@@ -459,7 +460,7 @@ This project provides some additional configuration items set with environment v
459
460
  | `OPENAI_API_KEY` | Yes | This is the API key you apply on the OpenAI account page | `sk-xxxxxx...xxxxxx` |
460
461
  | `OPENAI_PROXY_URL` | No | If you manually configure the OpenAI interface proxy, you can use this configuration item to override the default OpenAI API request base URL | `https://api.chatanywhere.cn` or `https://aihubmix.com/v1` <br/>The default value is<br/>`https://api.openai.com/v1` |
461
462
  | `ACCESS_CODE` | No | Add a password to access this service; you can set a long password to avoid leaking. If this value contains a comma, it is a password array. | `awCTe)re_r74` or `rtrt_ewee3@09!` or `code1,code2,code3` |
462
- | `CUSTOM_MODELS` | No | Used to control the model list. Use `+` to add a model, `-` to hide a model, and `model_name=display_name` to customize the display name of a model, separated by commas. | `qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` |
463
+ | `OPENAI_MODEL_LIST` | No | Used to control the model list. Use `+` to add a model, `-` to hide a model, and `model_name=display_name` to customize the display name of a model, separated by commas. | `qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` |
463
464
 
464
465
  > \[!NOTE]
465
466
  >
package/README.zh-CN.md CHANGED
@@ -124,6 +124,7 @@
124
124
  - **Anthropic (Claude)**:接入了 Anthropic 的 **Claude** 系列模型,包括 Claude 3 和 Claude 2,多模态突破,超长上下文,树立行业新基准。[了解更多](https://www.anthropic.com/claude)
125
125
  - **ChatGLM**:加入了智谱的 **ChatGLM** 系列模型(GLM-4/GLM-4-vision/GLM-3-turbo),为用户提供了另一种高效的会话模型选择。[了解更多](https://www.zhipuai.cn/)
126
126
  - **Moonshot AI (月之暗面)**:集成了 Moonshot 系列模型,这是一家来自中国的创新性 AI 创业公司,旨在提供更深层次的会话理解。[了解更多](https://www.moonshot.cn/)
127
+ - **Together.ai**:集成部署了数百种开源模型和向量模型,无需本地部署即可随时访问这些模型。[了解更多](https://www.together.ai/)
127
128
  - **01.AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
128
129
  - **Groq**:接入了 Groq 的 AI 模型,高效处理消息序列,生成回应,胜任多轮对话及单次交互任务。[了解更多](https://groq.com/)
129
130
  - **OpenRouter**:其支持包括 **Claude 3**,**Gemma**,**Mistral**,**Llama2**和**Cohere**等模型路由,支持智能路由优化,提升使用效率,开放且灵活。[了解更多](https://openrouter.ai/)
@@ -435,12 +436,12 @@ $ docker run -d -p 3210:3210 \
435
436
 
436
437
  本项目提供了一些额外的配置项,使用环境变量进行设置:
437
438
 
438
- | 环境变量 | 类型 | 描述 | 示例 |
439
- | ------------------ | ---- | ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ |
440
- | `OPENAI_API_KEY` | 必选 | 这是你在 OpenAI 账户页面申请的 API 密钥 | `sk-xxxxxx...xxxxxx` |
441
- | `OPENAI_PROXY_URL` | 可选 | 如果你手动配置了 OpenAI 接口代理,可以使用此配置项来覆盖默认的 OpenAI API 请求基础 URL | `https://api.chatanywhere.cn` 或 `https://aihubmix.com/v1`<br/>默认值:<br/>`https://api.openai.com/v1` |
442
- | `ACCESS_CODE` | 可选 | 添加访问此服务的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | `awCTe)re_r74` or `rtrt_ewee3@09!` or `code1,code2,code3` |
443
- | `CUSTOM_MODELS` | 可选 | 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 | `qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` |
439
+ | 环境变量 | 类型 | 描述 | 示例 |
440
+ | ------------------- | ---- | ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ |
441
+ | `OPENAI_API_KEY` | 必选 | 这是你在 OpenAI 账户页面申请的 API 密钥 | `sk-xxxxxx...xxxxxx` |
442
+ | `OPENAI_PROXY_URL` | 可选 | 如果你手动配置了 OpenAI 接口代理,可以使用此配置项来覆盖默认的 OpenAI API 请求基础 URL | `https://api.chatanywhere.cn` 或 `https://aihubmix.com/v1`<br/>默认值:<br/>`https://api.openai.com/v1` |
443
+ | `ACCESS_CODE` | 可选 | 添加访问此服务的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | `awCTe)re_r74` or `rtrt_ewee3@09!` or `code1,code2,code3` |
444
+ | `OPENAI_MODEL_LIST` | 可选 | 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 | `qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` |
444
445
 
445
446
  > \[!NOTE]
446
447
  >
@@ -24,11 +24,11 @@ LobeChat supports the configuration of external identity verification services f
24
24
 
25
25
  Currently supported identity verification services include:
26
26
 
27
- - [Auth0](/en/self-hosting/advanced/sso-providers/auth0)
28
- - [Microsoft Entra ID](/en/self-hosting/advanced/sso-providers/microsoft-entra-id)
29
- - [Authentik](/en/self-hosting/advanced/sso-providers/authentik)
30
- - [Github](/en/self-hosting/advanced/sso-providers/github)
31
- - [ZITADEL](/en/self-hosting/advanced/sso-providers/zitadel)
27
+ - [Auth0](/en/docs/self-hosting/advanced/sso-providers/auth0)
28
+ - [Microsoft Entra ID](/en/docs/self-hosting/advanced/sso-providers/microsoft-entra-id)
29
+ - [Authentik](/en/docs/self-hosting/advanced/sso-providers/authentik)
30
+ - [Github](/en/docs/self-hosting/advanced/sso-providers/github)
31
+ - [ZITADEL](/en/docs/self-hosting/advanced/sso-providers/zitadel)
32
32
 
33
33
  Click on the links to view the corresponding platform's configuration documentation.
34
34
 
@@ -48,6 +48,4 @@ The order corresponds to the display order of the SSO providers.
48
48
 
49
49
  ## Other SSO Providers
50
50
 
51
- Please refer to the [NextAuth.js][next-auth-js] documentation and feel free to submit a Pull Request.
52
-
53
- [next-auth-js]: https://next-auth.js.org/providers
51
+ Please refer to the [NextAuth.js](https://next-auth.js.org/providers) documentation and feel free to submit a Pull Request.
@@ -20,11 +20,11 @@ LobeChat 支持配置外部身份验证服务,供企业 / 组织内部使用
20
20
 
21
21
  目前支持的身份验证服务有:
22
22
 
23
- - [Auth0](/zh/self-hosting/advanced/sso-providers/auth0)
24
- - [Microsoft Entra ID](/zh/self-hosting/advanced/sso-providers/microsoft-entra-id)
25
- - [Authentik](/zh/self-hosting/advanced/sso-providers/authentik)
26
- - [Github](/zh/self-hosting/advanced/sso-providers/github)
27
- - [ZITADEL](/zh/self-hosting/advanced/sso-providers/zitadel)
23
+ - [Auth0](/zh/docs/self-hosting/advanced/sso-providers/auth0)
24
+ - [Microsoft Entra ID](/zh/docs/self-hosting/advanced/sso-providers/microsoft-entra-id)
25
+ - [Authentik](/zh/docs/self-hosting/advanced/sso-providers/authentik)
26
+ - [Github](/zh/docs/self-hosting/advanced/sso-providers/github)
27
+ - [ZITADEL](/zh/docs/self-hosting/advanced/sso-providers/zitadel)
28
28
 
29
29
  点击链接可以查看对应平台的配置文档。
30
30
 
@@ -44,6 +44,4 @@ LobeChat 支持配置外部身份验证服务,供企业 / 组织内部使用
44
44
 
45
45
  ## 其他 SSO 提供商
46
46
 
47
- 请参考 [NextAuth.js][next-auth-js] 文档,欢迎提交 Pull Request。
48
-
49
- [next-auth-js]: https://next-auth.js.org/providers
47
+ 请参考 [NextAuth.js](https://next-auth.js.org/providers) 文档,欢迎提交 Pull Request。
@@ -14,7 +14,7 @@ tags:
14
14
  - Configuration
15
15
  ---
16
16
 
17
- # Data Statistics
17
+ # Data Analytics
18
18
 
19
19
  We have integrated several free/open-source data analytics services in LobeChat for collecting user usage data. Here are environment variables that you can use.
20
20
 
@@ -1,8 +1,6 @@
1
1
  ---
2
2
  title: 在 LobeChat 中 配置数据统计服务环境变量指南
3
- description: >-
4
- 了解如何在 LobeChat 中配置各种数据统计服务的环境变量,包括Vercel Analytics、Google Analytics、Posthog
5
- Analytics和Umami Analytics。
3
+ description: 了解如何在 LobeChat 中配置各种数据统计服务的环境变量,包括Vercel Analytics、Google Analytics 等。
6
4
  tags:
7
5
  - 数据统计
8
6
  - 环境变量
@@ -2,30 +2,14 @@
2
2
  title: LobeChat Model Service Providers - Environment Variables and Configuration
3
3
  description: >-
4
4
  Learn about the environment variables and configuration settings for various
5
- model service providers like OpenAI, Azure OpenAI, ZHIPU AI, Moonshot AI,
6
- Google AI, AWS Bedrock, Ollama, Perplexity AI, Anthropic AI, Mistral AI, Groq
7
- AI, OpenRouter AI, 01.AI, and TogetherAI.
5
+ model service providers like OpenAI, Google AI, AWS Bedrock, Ollama,
6
+ Perplexity AI, Anthropic AI, Mistral AI, Groq AI, OpenRouter AI, and 01.AI.
8
7
  tags:
9
8
  - Model Service Providers
10
9
  - Environment Variables
11
- - Configuration
12
- - OpenAI
13
- - Azure OpenAI
14
- - ZHIPU AI
15
- - Moonshot AI
16
- - Google AI
17
- - AWS Bedrock
18
- - Ollama
19
- - Perplexity AI
20
- - Anthropic AI
21
- - Mistral AI
22
- - Groq AI
23
- - OpenRouter AI
24
- - 01.AI
25
- - TogetherAI
26
10
  ---
27
11
 
28
- # Model Service Providers
12
+ # Model Providers
29
13
 
30
14
  When deploying LobeChat, a rich set of environment variables related to model service providers is provided, allowing you to easily define the model service providers to be enabled in LobeChat.
31
15
 
@@ -64,7 +48,7 @@ Related discussions:
64
48
  - [Reasons for errors when using third-party interfaces](https://github.com/lobehub/lobe-chat/discussions/734)
65
49
  - [No response in chat after filling in the proxy server address](https://github.com/lobehub/lobe-chat/discussions/1065)
66
50
 
67
- ### `CUSTOM_MODELS`
51
+ ### `OPENAI_MODEL_LIST`
68
52
 
69
53
  - Type: Optional
70
54
  - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas.
@@ -138,6 +122,13 @@ When using the `turn` mode, the API Keys will be retrieved in a round-robin mann
138
122
  - Default: -
139
123
  - Example: `AIraDyDwcw254kwJaGjI9wwaHcdDCS__Vt3xQE`
140
124
 
125
+ ### `GOOGLE_PROXY_URL`
126
+
127
+ - Type: Optional
128
+ - Description: If you manually configure the Google interface proxy, you can use this configuration item to override the default Google API request base URL
129
+ - Default: `https://generativelanguage.googleapis.com`
130
+ - Example: `https://api.genai.gd.edu.kg/google`
131
+
141
132
  ## AWS Bedrock
142
133
 
143
134
  ### `AWS_ACCESS_KEY_ID`
@@ -170,6 +161,13 @@ When using the `turn` mode, the API Keys will be retrieved in a round-robin mann
170
161
  - Default: -
171
162
  - Example: `http://127.0.0.1:11434/v1`
172
163
 
164
+ ### `OLLAMA_MODEL_LIST`
165
+
166
+ - Type: Optional
167
+ - Description: Used to customize your own language models
168
+ - Default: -
169
+ - Example: `llama2:7B`
170
+
173
171
  ## Perplexity AI
174
172
 
175
173
  ### `PERPLEXITY_API_KEY`
@@ -215,7 +213,7 @@ When using the `turn` mode, the API Keys will be retrieved in a round-robin mann
215
213
  - Default: -
216
214
  - Example: `sk-or-v1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=`
217
215
 
218
- ### `OPENROUTER_CUSTOM_MODELS`
216
+ ### `OPENROUTER_MODEL_LIST`
219
217
 
220
218
  - Type: Optional
221
219
  - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas.
@@ -46,7 +46,7 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
46
46
  - [使用第三方接口报错的原因](https://github.com/lobehub/lobe-chat/discussions/734)
47
47
  - [代理服务器地址填了聊天没任何反应](https://github.com/lobehub/lobe-chat/discussions/1065)
48
48
 
49
- ### `CUSTOM_MODELS`
49
+ ### `OPENAI_MODEL_LIST`
50
50
 
51
51
  - 类型:可选
52
52
  - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
@@ -120,6 +120,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
120
120
  - 默认值:-
121
121
  - 示例:`AIraDyDwcw254kwJaGjI9wwaHcdDCS__Vt3xQE`
122
122
 
123
+ ### `GOOGLE_PROXY_URL`
124
+
125
+ - 类型:可选
126
+ - 描述:如果你手动配置了 Google 接口代理,可以使用此配置项来覆盖默认的 Google API 请求基础 URL
127
+ - 默认值:`https://generativelanguage.googleapis.com`
128
+ - 示例:`https://api.genai.gd.edu.kg/google`
129
+
123
130
  ## AWS Bedrock
124
131
 
125
132
  ### `AWS_ACCESS_KEY_ID`
@@ -152,6 +159,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
152
159
  - 默认值:-
153
160
  - 示例:`http://127.0.0.1:11434/v1`
154
161
 
162
+ ### `OLLAMA_MODEL_LIST`
163
+
164
+ - 类型:可选
165
+ - 描述:用于指定自定义 Ollama 语言模型
166
+ - 默认值:-
167
+ - 示例:`qwen:32B`
168
+
155
169
  ## Perplexity AI
156
170
 
157
171
  ### `PERPLEXITY_API_KEY`
@@ -197,7 +211,7 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
197
211
  - 默认值:-
198
212
  - 示例:`sk-or-v1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=`
199
213
 
200
- ### `OPENROUTER_CUSTOM_MODELS`
214
+ ### `OPENROUTER_MODEL_LIST`
201
215
 
202
216
  - 类型:可选
203
217
  - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
@@ -16,6 +16,7 @@ LobeChat provides some additional configuration options when deployed, which can
16
16
 
17
17
  <Cards>
18
18
  <Card href={'environment-variables/basic'} title={'Basic Environment Variables'} />
19
-
20
19
  <Card href={'environment-variables/model-provider'} title={'Model Service Providers'} />
20
+ <Cards href={'environment-variables/auth'} title={'Authentication'} />
21
+ <Cards href={'environment-variables/analytics'} title={'Data Analytics'} />
21
22
  </Cards>
@@ -14,6 +14,7 @@ LobeChat 在部署时提供了一些额外的配置项,你可以使用环境
14
14
 
15
15
  <Cards>
16
16
  <Cards href={'environment-variables/basic'} title={'基础环境变量'} />
17
-
18
17
  <Cards href={'environment-variables/model-provider'} title={'模型服务商'} />
18
+ <Cards href={'environment-variables/auth'} title={'身份验证'} />
19
+ <Cards href={'environment-variables/analytics'} title={'数据统计'} />
19
20
  </Cards>
@@ -1,8 +1,6 @@
1
1
  ---
2
2
  title: Configuring Azure OpenAI for LobeChat
3
- description: >-
4
- Learn how to configure Azure OpenAI for LobeChat, including usage limitations,
5
- interface configuration, and deployment settings.
3
+ description: Learn how to configure Azure OpenAI for LobeChat, including interface configuration, and deployment settings.
6
4
  tags:
7
5
  - Azure OpenAI
8
6
  - LobeChat
@@ -13,15 +11,7 @@ tags:
13
11
 
14
12
  # Integrating with Azure OpenAI
15
13
 
16
- LobeChat supports using [Azure OpenAI][azure-openai-url] as the model service provider for OpenAI. This article will explain how to configure Azure OpenAI.
17
-
18
- ## Usage Limitations
19
-
20
- Due to development costs ([#178][rfc]), the current version of LobeChat does not fully comply with the implementation model of Azure OpenAI. Instead, it adopts a solution based on `openai` to be compatible with Azure OpenAI. As a result, the following limitations exist:
21
-
22
- - Only one of OpenAI and Azure OpenAI can be selected. Once you enable Azure OpenAI, you will not be able to use OpenAI as the model service provider.
23
- - LobeChat requires the deployment name to be the same as the model name in order to function properly. For example, the deployment name for the `gpt-35-turbo` model must be `gpt-35-turbo`. Otherwise, LobeChat will not be able to match the corresponding model correctly. <Image alt="Usage Limitations" src="https://github-production-user-asset-6210df.s3.amazonaws.com/28616219/267082091-d89d53d3-1c8c-40ca-ba15-0a9af2a79264.png" />
24
- - Due to the complexity of integrating with Azure OpenAI's SDK, it is currently not possible to query the list of configured models.
14
+ LobeChat supports using [Azure OpenAI](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models) as the model service provider for OpenAI. This article will explain how to configure Azure OpenAI.
25
15
 
26
16
  ## Configuring in the Interface
27
17
 
@@ -36,7 +26,7 @@ You can fill in the corresponding configuration items as needed:
36
26
 
37
27
  - **API Key**: The API key you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
38
28
  - **API Address**: Azure API address, which can be found in the "Keys and Endpoints" section when checking resources in the Azure portal.
39
- - **Azure API Version**: The API version of Azure, following the format YYYY-MM-DD. Refer to the [latest version][azure-api-verion-url].
29
+ - **Azure API Version**: The API version of Azure, following the format YYYY-MM-DD. Refer to the [latest version](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions).
40
30
 
41
31
  After completing the configuration of the above fields, click "Check". If it prompts "Check passed", it means the configuration was successful.
42
32
 
@@ -46,17 +36,7 @@ If you want the deployed version to be pre-configured with Azure OpenAI for end
46
36
 
47
37
  | Environment Variable | Type | Description | Default Value | Example |
48
38
  | --- | --- | --- | --- | --- |
49
- | `USE_AZURE_OPENAI` | Required | Set this value to `1` to enable Azure OpenAI configuration | - | `1` |
50
39
  | `AZURE_API_KEY` | Required | This is the API key you obtained from the Azure OpenAI account page | - | `c55168be3874490ef0565d9779ecd5a6` |
51
- | `OPENAI_PROXY_URL` | Required | Azure API address, can be found in the "Keys and Endpoints" section when checking resources in the Azure portal | - | `https://docs-test-001.openai.azure.com` |
52
- | `AZURE_API_VERSION` | Optional | Azure API version, following the format YYYY-MM-DD | 2023-08-01-preview | `2023-05-15`, see [latest version][azure-api-verion-url] |
53
- | `ACCESS_CODE` | Optional | Add a password to access this service. You can set a long password to prevent brute force attacks. When this value is separated by commas, it becomes an array of passwords | - | `awCT74` or `e3@09!` or `code1,code2,code3` |
54
-
55
- <Callout>
56
- When you enable `USE_AZURE_OPENAI` on the server, users will be unable to modify and use the
57
- OpenAI API key in the frontend configuration.
58
- </Callout>
59
-
60
- [azure-api-verion-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions
61
- [azure-openai-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models
62
- [rfc]: https://github.com/lobehub/lobe-chat/discussions/178
40
+ | `AZURE_ENDPOINT` | Required | Azure API address, can be found in the "Keys and Endpoints" section when checking resources in the Azure portal | - | `https://docs-test-001.openai.azure.com` |
41
+ | `AZURE_API_VERSION` | Optional | Azure API version, following the format YYYY-MM-DD | 2023-08-01-preview | `-`, see [latest version](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions) |
42
+ | `ACCESS_CODE` | Optional | Add a password to access LobeChat. You can set a long password to prevent brute force attacks. When this value is separated by commas, it becomes an array of passwords | - | `awCT74` or `e3@09!` or `code1,code2,code3` |
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  title: 在 LobeChat 中集成 Azure OpenAI
3
- description: 了解如何在 LobeChat 中配置 Azure OpenAI 以及使用限制。从界面配置到部署时的环境变量设置,一步步指导。
3
+ description: 了解如何在 LobeChat 中配置 Azure OpenAI。一步步指导从界面配置到部署时的环境变量设置。
4
4
  tags:
5
5
  - Azure OpenAI
6
6
  - 配置指南
@@ -13,15 +13,7 @@ tags:
13
13
 
14
14
  # 与 Azure OpenAI 集成使用
15
15
 
16
- LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型服务商,本文将介绍如何配置 Azure OpenAI。
17
-
18
- ## 使用限制
19
-
20
- 从研发成本考虑 ([#178][rfc]),目前阶段的 LobeChat 并没有 100% 完全符合 Azure OpenAI 的实现模型,采用了以 `openai` 为基座,兼容 Azure OpeAI 的解决方案。因此会带来以下局限性:
21
-
22
- - OpenAI 与 Azure OpenAI 只能二选一,当你开启使用 Azure OpenAI 后,将无法使用 OpenAI 作为模型服务商;
23
- - LobeChat 约定了与模型同名的部署名才能正常使用,比如 `gpt-35-turbo` 模型的部署名,必须为 `gpt-35-turbo`,否则 LobeChat 将无法正常正确匹配到相应模型 <Image alt="使用限制" src="https://github-production-user-asset-6210df.s3.amazonaws.com/28616219/267082091-d89d53d3-1c8c-40ca-ba15-0a9af2a79264.png" />
24
- - 由于 Azure OpenAI 的 SDK 接入复杂度,当前无法查询配置资源的模型列表;
16
+ LobeChat 支持使用 [Azure OpenAI](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models) 作为 OpenAI 的模型服务商,本文将介绍如何配置 Azure OpenAI。
25
17
 
26
18
  ## 在界面中配置
27
19
 
@@ -36,7 +28,7 @@ LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型
36
28
 
37
29
  - **APIKey**:你在 Azure OpenAI 账户页面申请的 API 密钥,可在 “密钥和终结点” 部分中找到此值
38
30
  - **API 地址**:Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值
39
- - **Azure Api Version**: Azure 的 API 版本,遵循 YYYY-MM-DD 格式,查阅[最新版本][azure-api-verion-url]
31
+ - **Azure Api Version**: Azure 的 API 版本,遵循 YYYY-MM-DD 格式,查阅[最新版本](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions)
40
32
 
41
33
  完成上述字段配置后,点击「检查」,如果提示「检查通过」,则说明配置成功。
42
34
 
@@ -48,16 +40,7 @@ LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型
48
40
 
49
41
  | 环境变量 | 类型 | 描述 | 默认值 | 示例 |
50
42
  | --- | --- | --- | --- | --- |
51
- | `USE_AZURE_OPENAI` | 必选 | 设置改值为 `1` 开启 Azure OpenAI 配置 | - | `1` |
52
43
  | `AZURE_API_KEY` | 必选 | 这是你在 Azure OpenAI 账户页面申请的 API 密钥 | - | `c55168be3874490ef0565d9779ecd5a6` |
53
- | `OPENAI_PROXY_URL` | 必选 | Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值 | - | `https://docs-test-001.openai.azure.com` |
54
- | `AZURE_API_VERSION` | 可选 | Azure 的 API 版本,遵循 YYYY-MM-DD 格式 | 2023-08-01-preview | `2023-05-15`,查阅[最新版本][azure-api-verion-url] |
55
- | `ACCESS_CODE` | 可选 | 添加访问此服务的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | - | `awCT74` 或 `e3@09!` or `code1,code2,code3` |
56
-
57
- <Callout>
58
- 当你在服务端开启 `USE_AZURE_OPENAI` 后,用户将无法在前端配置中修改并使用 OpenAI API key。
59
- </Callout>
60
-
61
- [azure-api-verion-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions
62
- [azure-openai-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models
63
- [rfc]: https://github.com/lobehub/lobe-chat/discussions/178
44
+ | `AZURE_ENDPOINT` | 必选 | Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值 | - | `https://docs-test-001.openai.azure.com` |
45
+ | `AZURE_API_VERSION` | 可选 | Azure 的 API 版本,遵循 YYYY-MM-DD 格式 | 2023-08-01-preview | `-`,查阅[最新版本](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions) |
46
+ | `ACCESS_CODE` | 可选 | 添加访问 LobeChat 的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | - | `awCT74` 或 `e3@09!` or `code1,code2,code3` |
@@ -32,4 +32,13 @@ docker run -d -p 3210:3210 -e OLLAMA_PROXY_URL=http://host.docker.internal:11434
32
32
 
33
33
  Now, you can use LobeChat to converse with the local LLM.
34
34
 
35
- For more information on using Ollama in LobeChat, please refer to [Ollama Usage](/en/usage/providers/ollama).
35
+ For more information on using Ollama in LobeChat, please refer to [Ollama Usage](/docs/usage/providers/ollama).
36
+
37
+ ## Accessing Ollama from Non-Local Locations
38
+
39
+ When you first initiate Ollama, it is configured to allow access only from the local machine. To enable access from other domains and set up port listening, you will need to adjust the environment variables accordingly.
40
+ ```
41
+ set OLLAMA_ORIGINS=*
42
+ set OLLAMA_HOST=0.0.0.0:11434
43
+ ```
44
+ For further guidance on configuration, consult the [Ollama Official Documentation](https://ollama.com/docs/configuration).
@@ -30,4 +30,13 @@ docker run -d -p 3210:3210 -e OLLAMA_PROXY_URL=http://host.docker.internal:11434
30
30
 
31
31
  接下来,你就可以使用 LobeChat 与本地 LLM 对话了。
32
32
 
33
- 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/usage/providers/ollama)。
33
+ 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/docs/usage/providers/ollama)。
34
+
35
+ ## 非本地访问 Ollama
36
+
37
+ 由于 Ollama 默认参数在启动时仅设置了本地访问,所以跨域访问以及端口监听需要进行额外的环境变量设置
38
+ ```
39
+ set OLLAMA_ORIGINS=*
40
+ set OLLAMA_HOST=0.0.0.0:11434
41
+ ```
42
+ 详细配置方法可以参考 [Ollama 官方文档](https://ollama.com/docs/configuration)。
@@ -34,7 +34,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
34
34
  ```fish
35
35
  $ apt install docker.io
36
36
  ```
37
-
37
+
38
38
  </Tab>
39
39
 
40
40
  <Tab>
@@ -43,7 +43,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
43
43
  ```
44
44
 
45
45
  </Tab>
46
-
46
+
47
47
  </Tabs>
48
48
 
49
49
  ### Docker Command Deployment
@@ -106,7 +106,7 @@ First, create a `lobe.env` configuration file with various environment variables
106
106
  OPENAI_API_KEY=sk-xxxx
107
107
  OPENAI_PROXY_URL=https://api-proxy.com/v1
108
108
  ACCESS_CODE=arthals2333
109
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
109
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
110
110
  ```
111
111
 
112
112
  Then, you can use the following script to automate the update:
@@ -33,7 +33,7 @@ tags:
33
33
  ```fish
34
34
  $ apt install docker.io
35
35
  ```
36
-
36
+
37
37
  </Tab>
38
38
 
39
39
  <Tab>
@@ -42,7 +42,7 @@ tags:
42
42
  ```
43
43
 
44
44
  </Tab>
45
-
45
+
46
46
  </Tabs>
47
47
 
48
48
  ### Docker 指令部署
@@ -104,7 +104,7 @@ $ docker run -d -p 3210:3210 \
104
104
  OPENAI_API_KEY=sk-xxxx
105
105
  OPENAI_PROXY_URL=https://api-proxy.com/v1
106
106
  ACCESS_CODE=arthals2333
107
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
107
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
108
108
  ```
109
109
 
110
110
  然后,你可以使用以下脚本来自动更新:
@@ -11,6 +11,7 @@ tags:
11
11
  - Google AI Gemini
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
+ - Together AI
14
15
  - local model support
15
16
  - Ollama
16
17
  ---
@@ -35,9 +36,14 @@ In this way, LobeChat can more flexibly adapt to the needs of different users, w
35
36
  We have implemented support for the following model service providers:
36
37
 
37
38
  - **AWS Bedrock**: Integrated with AWS Bedrock service, supporting models such as **Claude / LLama2**, providing powerful natural language processing capabilities. [Learn more](https://aws.amazon.com/cn/bedrock)
39
+ - **Anthropic (Claude)**: Accessed Anthropic's **Claude** series models, including Claude 3 and Claude 2, with breakthroughs in multi-modal capabilities and extended context, setting a new industry benchmark. [Learn more](https://www.anthropic.com/claude)
38
40
  - **Google AI (Gemini Pro, Gemini Vision)**: Access to Google's **Gemini** series models, including Gemini and Gemini Pro, to support advanced language understanding and generation. [Learn more](https://deepmind.google/technologies/gemini/)
39
41
  - **ChatGLM**: Added the **ChatGLM** series models from Zhipuai (GLM-4/GLM-4-vision/GLM-3-turbo), providing users with another efficient conversation model choice. [Learn more](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (Dark Side of the Moon)**: Integrated with the Moonshot series models, an innovative AI startup from China, aiming to provide deeper conversation understanding. [Learn more](https://www.moonshot.cn/)
43
+ - **Groq**: Accessed Groq's AI models, efficiently processing message sequences and generating responses, capable of multi-turn dialogues and single-interaction tasks. [Learn more](https://groq.com/)
44
+ - **OpenRouter**: Supports routing of models including **Claude 3**, **Gemma**, **Mistral**, **Llama2** and **Cohere**, with intelligent routing optimization to improve usage efficiency, open and flexible. [Learn more](https://openrouter.ai/)
45
+ - **01.AI (Yi Model)**: Integrated the 01.AI models, with series of APIs featuring fast inference speed, which not only shortened the processing time, but also maintained excellent model performance. [Learn more](https://01.ai/)
46
+ - **Together.ai**: Over 100 leading open-source Chat, Language, Image, Code, and Embedding models are available through the Together Inference API. For these models you pay just for what you use. [Learn more](https://www.together.ai/)
41
47
 
42
48
  At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
43
49
 
@@ -12,6 +12,7 @@ tags:
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
14
  - 01 AI
15
+ - Together AI
15
16
  - Ollama
16
17
  ---
17
18
 
@@ -36,9 +37,13 @@ tags:
36
37
 
37
38
  - **AWS Bedrock**:集成了 AWS Bedrock 服务,支持了 **Claude / LLama2** 等模型,提供了强大的自然语言处理能力。[了解更多](https://aws.amazon.com/cn/bedrock)
38
39
  - **Google AI (Gemini Pro、Gemini Vision)**:接入了 Google 的 **Gemini** 系列模型,包括 Gemini 和 Gemini Pro,以支持更高级的语言理解和生成。[了解更多](https://deepmind.google/technologies/gemini/)
40
+ - **Anthropic (Claude)**:接入了 Anthropic 的 **Claude** 系列模型,包括 Claude 3 和 Claude 2,多模态突破,超长上下文,树立行业新基准。[了解更多](https://www.anthropic.com/claude)
39
41
  - **ChatGLM**:加入了智谱的 **ChatGLM** 系列模型(GLM-4/GLM-4-vision/GLM-3-turbo),为用户提供了另一种高效的会话模型选择。[了解更多](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (月之暗面)**:集成了 Moonshot 系列模型,这是一家来自中国的创新性 AI 创业公司,旨在提供更深层次的会话理解。[了解更多](https://www.moonshot.cn/)
41
- - **01 AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
43
+ - **Together.ai**:集成部署了数百种开源模型和向量模型,无需本地部署即可随时访问这些模型。[了解更多](https://www.together.ai/)
44
+ - **01.AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
45
+ - **Groq**:接入了 Groq 的 AI 模型,高效处理消息序列,生成回应,胜任多轮对话及单次交互任务。[了解更多](https://groq.com/)
46
+ - **OpenRouter**:其支持包括 **Claude 3**,**Gemma**,**Mistral**,**Llama2**和**Cohere**等模型路由,支持智能路由优化,提升使用效率,开放且灵活。[了解更多](https://openrouter.ai/)
42
47
 
43
48
  同时,我们也在计划支持更多的模型服务商,如 Replicate 和 Perplexity 等,以进一步丰富我们的服务商库。如果你希望让 LobeChat 支持你喜爱的服务商,欢迎加入我们的[社区讨论](https://github.com/lobehub/lobe-chat/discussions/1284)。
44
49
 
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "بالإضافة إلى العنوان الافتراضي، يجب أن يتضمن http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "عنوان وكيل API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "أدخل مفتاح API الخاص بـ Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Изисква се адрес, включително http(s)://, освен ако не е по подразбиране",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Адрес на API прокси"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Въведете API Key, получен от Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Geben Sie Ihren API-Key von Google ein",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Must include http(s):// besides the default address",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API Proxy Address"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Enter the API Key from Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Aparte de la dirección predeterminada, debe incluir http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Dirección del proxy de la API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Introduce la clave API proporcionada por Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Incluez http(s):// en plus de l'adresse par défaut",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Adresse du proxy API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Saisissez la clé API de Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Indirizzo dell'API Proxy"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Inserisci la chiave API da Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "デフォルトのアドレスに加えて、http(s)://を含める必要があります",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "APIプロキシアドレス"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google の API Key を入力してください",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다.",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 프록시 주소"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google에서 제공하는 API 키를 입력하세요.",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Voer de API Key van Google in",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Wprowadź klucz API uzyskany od Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Além do endereço padrão, deve incluir http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Endereço do Proxy da API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Insira sua API Key fornecida pelo Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Помимо адреса по умолчанию, должен включать http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Адрес прокси-API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Введите свой API Key от Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google'dan gelen API Key'i girin",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Địa chỉ Proxy API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Nhập API Key từ Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "填入来自 Google 的 API Key",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除了預設地址外,必須包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "填入來自 Google 的 API 金鑰",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.147.2",
3
+ "version": "0.147.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -86,7 +86,7 @@
86
86
  "@aws-sdk/client-bedrock-runtime": "^3.549.0",
87
87
  "@azure/openai": "^1.0.0-beta.12",
88
88
  "@cfworker/json-schema": "^1.12.8",
89
- "@google/generative-ai": "^0.3.1",
89
+ "@google/generative-ai": "^0.5.0",
90
90
  "@icons-pack/react-simple-icons": "^9.4.0",
91
91
  "@lobehub/chat-plugin-sdk": "latest",
92
92
  "@lobehub/chat-plugins-gateway": "latest",
@@ -224,10 +224,11 @@ class AgentRuntime {
224
224
  }
225
225
 
226
226
  private static initGoogle(payload: JWTPayload) {
227
- const { GOOGLE_API_KEY } = getServerConfig();
227
+ const { GOOGLE_API_KEY, GOOGLE_PROXY_URL } = getServerConfig();
228
228
  const apiKey = apiKeyManager.pick(payload?.apiKey || GOOGLE_API_KEY);
229
+ const baseURL = payload?.endpoint || GOOGLE_PROXY_URL;
229
230
 
230
- return new LobeGoogleAI({ apiKey });
231
+ return new LobeGoogleAI({ apiKey, baseURL });
231
232
  }
232
233
 
233
234
  private static initBedrock(payload: JWTPayload) {
@@ -12,6 +12,7 @@ const GoogleProvider = memo(() => {
12
12
  <ProviderConfig
13
13
  checkModel={'gemini-pro'}
14
14
  provider={ModelProvider.Google}
15
+ showEndpoint
15
16
  title={
16
17
  <Flexbox align={'center'} gap={8} horizontal>
17
18
  <Google.BrandColor size={28} />
@@ -28,6 +28,7 @@ declare global {
28
28
  // Google Provider
29
29
  ENABLED_GOOGLE?: string;
30
30
  GOOGLE_API_KEY?: string;
31
+ GOOGLE_PROXY_URL?: string;
31
32
 
32
33
  // Moonshot Provider
33
34
  ENABLED_MOONSHOT?: string;
@@ -154,6 +155,7 @@ export const getProviderConfig = () => {
154
155
 
155
156
  ENABLED_GOOGLE: !!GOOGLE_API_KEY,
156
157
  GOOGLE_API_KEY,
158
+ GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
157
159
 
158
160
  ENABLED_PERPLEXITY: !!PERPLEXITY_API_KEY,
159
161
  PERPLEXITY_API_KEY,
@@ -27,11 +27,13 @@ enum HarmBlockThreshold {
27
27
 
28
28
  export class LobeGoogleAI implements LobeRuntimeAI {
29
29
  private client: GoogleGenerativeAI;
30
+ baseURL?: string;
30
31
 
31
- constructor({ apiKey }: { apiKey?: string }) {
32
+ constructor({ apiKey, baseURL }: { apiKey?: string; baseURL?: string }) {
32
33
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidGoogleAPIKey);
33
34
 
34
35
  this.client = new GoogleGenerativeAI(apiKey);
36
+ this.baseURL = baseURL;
35
37
  }
36
38
 
37
39
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
@@ -70,7 +72,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
70
72
  },
71
73
  ],
72
74
  },
73
- { apiVersion: 'v1beta' },
75
+ { apiVersion: 'v1beta', baseUrl: this.baseURL },
74
76
  )
75
77
  .generateContentStream({ contents });
76
78
 
@@ -69,6 +69,11 @@ export default {
69
69
  },
70
70
  },
71
71
  google: {
72
+ endpoint: {
73
+ desc: '除默认地址外,必须包含 http(s)://',
74
+ placeholder: 'https://generativelanguage.googleapis.com',
75
+ title: 'API 代理地址',
76
+ },
72
77
  title: 'Google',
73
78
  token: {
74
79
  desc: '填入来自 Google 的 API Key',