@lobehub/chat 0.147.2 → 0.147.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/.env.example +47 -56
  2. package/CHANGELOG.md +58 -0
  3. package/Dockerfile +3 -2
  4. package/README.md +2 -1
  5. package/README.zh-CN.md +7 -6
  6. package/docs/self-hosting/advanced/authentication.mdx +6 -8
  7. package/docs/self-hosting/advanced/authentication.zh-CN.mdx +6 -8
  8. package/docs/self-hosting/advanced/sso-providers/auth0.mdx +2 -2
  9. package/docs/self-hosting/advanced/sso-providers/auth0.zh-CN.mdx +2 -2
  10. package/docs/self-hosting/advanced/sso-providers/authentik.mdx +2 -2
  11. package/docs/self-hosting/advanced/sso-providers/authentik.zh-CN.mdx +2 -2
  12. package/docs/self-hosting/advanced/sso-providers/github.mdx +1 -1
  13. package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +2 -2
  14. package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.mdx +1 -1
  15. package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.zh-CN.mdx +2 -2
  16. package/docs/self-hosting/advanced/sso-providers/zitadel.mdx +2 -2
  17. package/docs/self-hosting/advanced/sso-providers/zitadel.zh-CN.mdx +2 -2
  18. package/docs/self-hosting/environment-variables/analytics.mdx +1 -1
  19. package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +1 -3
  20. package/docs/self-hosting/environment-variables/model-provider.mdx +19 -21
  21. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -2
  22. package/docs/self-hosting/environment-variables.mdx +2 -1
  23. package/docs/self-hosting/environment-variables.zh-CN.mdx +2 -1
  24. package/docs/self-hosting/examples/azure-openai.mdx +6 -26
  25. package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +6 -23
  26. package/docs/self-hosting/examples/ollama.mdx +12 -1
  27. package/docs/self-hosting/examples/ollama.zh-CN.mdx +10 -1
  28. package/docs/self-hosting/platform/docker.mdx +4 -4
  29. package/docs/self-hosting/platform/docker.zh-CN.mdx +4 -4
  30. package/docs/self-hosting/platform/netlify.mdx +1 -1
  31. package/docs/self-hosting/platform/netlify.zh-CN.mdx +1 -1
  32. package/docs/self-hosting/platform/vercel.mdx +1 -1
  33. package/docs/self-hosting/platform/vercel.zh-CN.mdx +1 -1
  34. package/docs/usage/agents/custom-agent.mdx +2 -2
  35. package/docs/usage/agents/custom-agent.zh-CN.mdx +2 -2
  36. package/docs/usage/features/multi-ai-providers.mdx +7 -1
  37. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +7 -2
  38. package/docs/usage/features/plugin-system.mdx +1 -1
  39. package/docs/usage/features/plugin-system.zh-CN.mdx +2 -2
  40. package/docs/usage/plugins/custom-plugin.mdx +1 -1
  41. package/docs/usage/plugins/custom-plugin.zh-CN.mdx +1 -1
  42. package/docs/usage/providers/ollama/gemma.mdx +3 -3
  43. package/docs/usage/providers/ollama/gemma.zh-CN.mdx +3 -3
  44. package/docs/usage/providers/ollama/qwen.mdx +3 -3
  45. package/docs/usage/providers/ollama/qwen.zh-CN.mdx +3 -3
  46. package/docs/usage/providers/ollama.mdx +3 -3
  47. package/docs/usage/providers/ollama.zh-CN.mdx +3 -3
  48. package/locales/ar/modelProvider.json +5 -0
  49. package/locales/bg-BG/modelProvider.json +5 -0
  50. package/locales/de-DE/modelProvider.json +5 -0
  51. package/locales/en-US/modelProvider.json +5 -0
  52. package/locales/es-ES/modelProvider.json +5 -0
  53. package/locales/fr-FR/modelProvider.json +5 -0
  54. package/locales/it-IT/modelProvider.json +5 -0
  55. package/locales/ja-JP/modelProvider.json +5 -0
  56. package/locales/ko-KR/modelProvider.json +5 -0
  57. package/locales/nl-NL/modelProvider.json +5 -0
  58. package/locales/pl-PL/modelProvider.json +5 -0
  59. package/locales/pt-BR/modelProvider.json +5 -0
  60. package/locales/ru-RU/modelProvider.json +5 -0
  61. package/locales/tr-TR/modelProvider.json +5 -0
  62. package/locales/vi-VN/modelProvider.json +5 -0
  63. package/locales/zh-CN/modelProvider.json +5 -0
  64. package/locales/zh-TW/modelProvider.json +5 -0
  65. package/package.json +2 -2
  66. package/src/app/api/chat/agentRuntime.ts +3 -2
  67. package/src/app/settings/llm/Google/index.tsx +1 -0
  68. package/src/config/modelProviders/openrouter.ts +64 -0
  69. package/src/config/server/provider.ts +2 -0
  70. package/src/const/layoutTokens.ts +1 -1
  71. package/src/libs/agent-runtime/google/index.ts +4 -2
  72. package/src/locales/default/modelProvider.ts +5 -0
@@ -1,8 +1,6 @@
1
1
  ---
2
2
  title: Configuring Azure OpenAI for LobeChat
3
- description: >-
4
- Learn how to configure Azure OpenAI for LobeChat, including usage limitations,
5
- interface configuration, and deployment settings.
3
+ description: Learn how to configure Azure OpenAI for LobeChat, including interface configuration, and deployment settings.
6
4
  tags:
7
5
  - Azure OpenAI
8
6
  - LobeChat
@@ -13,15 +11,7 @@ tags:
13
11
 
14
12
  # Integrating with Azure OpenAI
15
13
 
16
- LobeChat supports using [Azure OpenAI][azure-openai-url] as the model service provider for OpenAI. This article will explain how to configure Azure OpenAI.
17
-
18
- ## Usage Limitations
19
-
20
- Due to development costs ([#178][rfc]), the current version of LobeChat does not fully comply with the implementation model of Azure OpenAI. Instead, it adopts a solution based on `openai` to be compatible with Azure OpenAI. As a result, the following limitations exist:
21
-
22
- - Only one of OpenAI and Azure OpenAI can be selected. Once you enable Azure OpenAI, you will not be able to use OpenAI as the model service provider.
23
- - LobeChat requires the deployment name to be the same as the model name in order to function properly. For example, the deployment name for the `gpt-35-turbo` model must be `gpt-35-turbo`. Otherwise, LobeChat will not be able to match the corresponding model correctly. <Image alt="Usage Limitations" src="https://github-production-user-asset-6210df.s3.amazonaws.com/28616219/267082091-d89d53d3-1c8c-40ca-ba15-0a9af2a79264.png" />
24
- - Due to the complexity of integrating with Azure OpenAI's SDK, it is currently not possible to query the list of configured models.
14
+ LobeChat supports using [Azure OpenAI](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models) as the model service provider for OpenAI. This article will explain how to configure Azure OpenAI.
25
15
 
26
16
  ## Configuring in the Interface
27
17
 
@@ -36,7 +26,7 @@ You can fill in the corresponding configuration items as needed:
36
26
 
37
27
  - **API Key**: The API key you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
38
28
  - **API Address**: Azure API address, which can be found in the "Keys and Endpoints" section when checking resources in the Azure portal.
39
- - **Azure API Version**: The API version of Azure, following the format YYYY-MM-DD. Refer to the [latest version][azure-api-verion-url].
29
+ - **Azure API Version**: The API version of Azure, following the format YYYY-MM-DD. Refer to the [latest version](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions).
40
30
 
41
31
  After completing the configuration of the above fields, click "Check". If it prompts "Check passed", it means the configuration was successful.
42
32
 
@@ -46,17 +36,7 @@ If you want the deployed version to be pre-configured with Azure OpenAI for end
46
36
 
47
37
  | Environment Variable | Type | Description | Default Value | Example |
48
38
  | --- | --- | --- | --- | --- |
49
- | `USE_AZURE_OPENAI` | Required | Set this value to `1` to enable Azure OpenAI configuration | - | `1` |
50
39
  | `AZURE_API_KEY` | Required | This is the API key you obtained from the Azure OpenAI account page | - | `c55168be3874490ef0565d9779ecd5a6` |
51
- | `OPENAI_PROXY_URL` | Required | Azure API address, can be found in the "Keys and Endpoints" section when checking resources in the Azure portal | - | `https://docs-test-001.openai.azure.com` |
52
- | `AZURE_API_VERSION` | Optional | Azure API version, following the format YYYY-MM-DD | 2023-08-01-preview | `2023-05-15`, see [latest version][azure-api-verion-url] |
53
- | `ACCESS_CODE` | Optional | Add a password to access this service. You can set a long password to prevent brute force attacks. When this value is separated by commas, it becomes an array of passwords | - | `awCT74` or `e3@09!` or `code1,code2,code3` |
54
-
55
- <Callout>
56
- When you enable `USE_AZURE_OPENAI` on the server, users will be unable to modify and use the
57
- OpenAI API key in the frontend configuration.
58
- </Callout>
59
-
60
- [azure-api-verion-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions
61
- [azure-openai-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models
62
- [rfc]: https://github.com/lobehub/lobe-chat/discussions/178
40
+ | `AZURE_ENDPOINT` | Required | Azure API address, can be found in the "Keys and Endpoints" section when checking resources in the Azure portal | - | `https://docs-test-001.openai.azure.com` |
41
+ | `AZURE_API_VERSION` | Optional | Azure API version, following the format YYYY-MM-DD | 2023-08-01-preview | `-`, see [latest version](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions) |
42
+ | `ACCESS_CODE` | Optional | Add a password to access LobeChat. You can set a long password to prevent brute force attacks. When this value is separated by commas, it becomes an array of passwords | - | `awCT74` or `e3@09!` or `code1,code2,code3` |
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  title: 在 LobeChat 中集成 Azure OpenAI
3
- description: 了解如何在 LobeChat 中配置 Azure OpenAI 以及使用限制。从界面配置到部署时的环境变量设置,一步步指导。
3
+ description: 了解如何在 LobeChat 中配置 Azure OpenAI。一步步指导从界面配置到部署时的环境变量设置。
4
4
  tags:
5
5
  - Azure OpenAI
6
6
  - 配置指南
@@ -13,15 +13,7 @@ tags:
13
13
 
14
14
  # 与 Azure OpenAI 集成使用
15
15
 
16
- LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型服务商,本文将介绍如何配置 Azure OpenAI。
17
-
18
- ## 使用限制
19
-
20
- 从研发成本考虑 ([#178][rfc]),目前阶段的 LobeChat 并没有 100% 完全符合 Azure OpenAI 的实现模型,采用了以 `openai` 为基座,兼容 Azure OpeAI 的解决方案。因此会带来以下局限性:
21
-
22
- - OpenAI 与 Azure OpenAI 只能二选一,当你开启使用 Azure OpenAI 后,将无法使用 OpenAI 作为模型服务商;
23
- - LobeChat 约定了与模型同名的部署名才能正常使用,比如 `gpt-35-turbo` 模型的部署名,必须为 `gpt-35-turbo`,否则 LobeChat 将无法正常正确匹配到相应模型 <Image alt="使用限制" src="https://github-production-user-asset-6210df.s3.amazonaws.com/28616219/267082091-d89d53d3-1c8c-40ca-ba15-0a9af2a79264.png" />
24
- - 由于 Azure OpenAI 的 SDK 接入复杂度,当前无法查询配置资源的模型列表;
16
+ LobeChat 支持使用 [Azure OpenAI](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models) 作为 OpenAI 的模型服务商,本文将介绍如何配置 Azure OpenAI。
25
17
 
26
18
  ## 在界面中配置
27
19
 
@@ -36,7 +28,7 @@ LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型
36
28
 
37
29
  - **APIKey**:你在 Azure OpenAI 账户页面申请的 API 密钥,可在 “密钥和终结点” 部分中找到此值
38
30
  - **API 地址**:Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值
39
- - **Azure Api Version**: Azure 的 API 版本,遵循 YYYY-MM-DD 格式,查阅[最新版本][azure-api-verion-url]
31
+ - **Azure Api Version**: Azure 的 API 版本,遵循 YYYY-MM-DD 格式,查阅[最新版本](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions)
40
32
 
41
33
  完成上述字段配置后,点击「检查」,如果提示「检查通过」,则说明配置成功。
42
34
 
@@ -48,16 +40,7 @@ LobeChat 支持使用 [Azure OpenAI][azure-openai-url] 作为 OpenAI 的模型
48
40
 
49
41
  | 环境变量 | 类型 | 描述 | 默认值 | 示例 |
50
42
  | --- | --- | --- | --- | --- |
51
- | `USE_AZURE_OPENAI` | 必选 | 设置改值为 `1` 开启 Azure OpenAI 配置 | - | `1` |
52
43
  | `AZURE_API_KEY` | 必选 | 这是你在 Azure OpenAI 账户页面申请的 API 密钥 | - | `c55168be3874490ef0565d9779ecd5a6` |
53
- | `OPENAI_PROXY_URL` | 必选 | Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值 | - | `https://docs-test-001.openai.azure.com` |
54
- | `AZURE_API_VERSION` | 可选 | Azure 的 API 版本,遵循 YYYY-MM-DD 格式 | 2023-08-01-preview | `2023-05-15`,查阅[最新版本][azure-api-verion-url] |
55
- | `ACCESS_CODE` | 可选 | 添加访问此服务的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | - | `awCT74` 或 `e3@09!` or `code1,code2,code3` |
56
-
57
- <Callout>
58
- 当你在服务端开启 `USE_AZURE_OPENAI` 后,用户将无法在前端配置中修改并使用 OpenAI API key。
59
- </Callout>
60
-
61
- [azure-api-verion-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions
62
- [azure-openai-url]: https://learn.microsoft.com/zh-cn/azure/ai-services/openai/concepts/models
63
- [rfc]: https://github.com/lobehub/lobe-chat/discussions/178
44
+ | `AZURE_ENDPOINT` | 必选 | Azure API 地址,从 Azure 门户检查资源时,可在 “密钥和终结点” 部分中找到此值 | - | `https://docs-test-001.openai.azure.com` |
45
+ | `AZURE_API_VERSION` | 可选 | Azure 的 API 版本,遵循 YYYY-MM-DD 格式 | 2023-08-01-preview | `-`,查阅[最新版本](https://learn.microsoft.com/zh-cn/azure/ai-services/openai/reference#chat-completions) |
46
+ | `ACCESS_CODE` | 可选 | 添加访问 LobeChat 的密码,你可以设置一个长密码以防被爆破,该值用逗号分隔时为密码数组 | - | `awCT74` 或 `e3@09!` or `code1,code2,code3` |
@@ -32,4 +32,15 @@ docker run -d -p 3210:3210 -e OLLAMA_PROXY_URL=http://host.docker.internal:11434
32
32
 
33
33
  Now, you can use LobeChat to converse with the local LLM.
34
34
 
35
- For more information on using Ollama in LobeChat, please refer to [Ollama Usage](/en/usage/providers/ollama).
35
+
36
+ For more information on using Ollama in LobeChat, please refer to [Ollama Usage](/docs/usage/providers/ollama).
37
+
38
+
39
+ ## Accessing Ollama from Non-Local Locations
40
+
41
+ When you first initiate Ollama, it is configured to allow access only from the local machine. To enable access from other domains and set up port listening, you will need to adjust the environment variables accordingly.
42
+ ```
43
+ set OLLAMA_ORIGINS=*
44
+ set OLLAMA_HOST=0.0.0.0:11434
45
+ ```
46
+ For further guidance on configuration, consult the [Ollama Official Documentation](https://ollama.com/docs/configuration).
@@ -30,4 +30,13 @@ docker run -d -p 3210:3210 -e OLLAMA_PROXY_URL=http://host.docker.internal:11434
30
30
 
31
31
  接下来,你就可以使用 LobeChat 与本地 LLM 对话了。
32
32
 
33
- 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/usage/providers/ollama)。
33
+ 关于在 LobeChat 中使用 Ollama 的更多信息,请查阅 [Ollama 使用](/zh/docs/usage/providers/ollama)。
34
+
35
+ ## 非本地访问 Ollama
36
+
37
+ 由于 Ollama 默认参数在启动时仅设置了本地访问,所以跨域访问以及端口监听需要进行额外的环境变量设置
38
+ ```
39
+ set OLLAMA_ORIGINS=*
40
+ set OLLAMA_HOST=0.0.0.0:11434
41
+ ```
42
+ 详细配置方法可以参考 [Ollama 官方文档](https://ollama.com/docs/configuration)。
@@ -34,7 +34,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
34
34
  ```fish
35
35
  $ apt install docker.io
36
36
  ```
37
-
37
+
38
38
  </Tab>
39
39
 
40
40
  <Tab>
@@ -43,7 +43,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat
43
43
  ```
44
44
 
45
45
  </Tab>
46
-
46
+
47
47
  </Tabs>
48
48
 
49
49
  ### Docker Command Deployment
@@ -64,7 +64,7 @@ Command explanation:
64
64
 
65
65
  - Replace `sk-xxxx` in the above command with your OpenAI API Key.
66
66
 
67
- - For the complete list of environment variables supported by LobeChat, please refer to the [Environment Variables](/zh/self-hosting/environment-ariable) section.
67
+ - For the complete list of environment variables supported by LobeChat, please refer to the [Environment Variables](/zh/docs/self-hosting/environment-ariable) section.
68
68
 
69
69
  <Callout type="tip">
70
70
  Since the official Docker image build takes about half an hour, if you see the "update available"
@@ -106,7 +106,7 @@ First, create a `lobe.env` configuration file with various environment variables
106
106
  OPENAI_API_KEY=sk-xxxx
107
107
  OPENAI_PROXY_URL=https://api-proxy.com/v1
108
108
  ACCESS_CODE=arthals2333
109
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
109
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
110
110
  ```
111
111
 
112
112
  Then, you can use the following script to automate the update:
@@ -33,7 +33,7 @@ tags:
33
33
  ```fish
34
34
  $ apt install docker.io
35
35
  ```
36
-
36
+
37
37
  </Tab>
38
38
 
39
39
  <Tab>
@@ -42,7 +42,7 @@ tags:
42
42
  ```
43
43
 
44
44
  </Tab>
45
-
45
+
46
46
  </Tabs>
47
47
 
48
48
  ### Docker 指令部署
@@ -63,7 +63,7 @@ $ docker run -d -p 3210:3210 \
63
63
  - 使用你的 OpenAI API Key 替换上述命令中的 `sk-xxxx`
64
64
 
65
65
  <Callout type={'tip'}>
66
- LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/self-hosting/environment-variables) 部分
66
+ LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) 部分
67
67
  </Callout>
68
68
 
69
69
  <Callout>
@@ -104,7 +104,7 @@ $ docker run -d -p 3210:3210 \
104
104
  OPENAI_API_KEY=sk-xxxx
105
105
  OPENAI_PROXY_URL=https://api-proxy.com/v1
106
106
  ACCESS_CODE=arthals2333
107
- CUSTOM_MODELS=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
107
+ OPENAI_MODEL_LIST=-gpt-4,-gpt-4-32k,-gpt-3.5-turbo-16k,gpt-3.5-turbo-1106=gpt-3.5-turbo-16k,gpt-4-0125-preview=gpt-4-turbo,gpt-4-vision-preview=gpt-4-vision
108
108
  ```
109
109
 
110
110
  然后,你可以使用以下脚本来自动更新:
@@ -92,7 +92,7 @@ Taking OpenAI as an example, the environment variables you need to add are as fo
92
92
 
93
93
  <Callout type={'tip'}>
94
94
  For a complete list of environment variables supported by LobeChat, please refer to the [📘
95
- Environment Variables](/en/self-hosting/environment-variables)
95
+ Environment Variables](/en/docs/self-hosting/environment-variables)
96
96
  </Callout>
97
97
 
98
98
  Afteradding the variables, finally click "Deploy lobe-chat" to enter the deployment phase
@@ -86,7 +86,7 @@ tags:
86
86
  | `OPENAI_PROXY_URL` | 可选 | 如果你手动配置了 OpenAI 接口代理,可以使用此配置项来覆盖默认的 OpenAI API 请求基础 URL | `https://aihubmix.com/v1` ,默认值:`https://api.openai.com/v1` |
87
87
 
88
88
  <Callout type={'tip'}>
89
- LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/self-hosting/environment-variables) 部分
89
+ LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) 部分
90
90
  </Callout>
91
91
 
92
92
  添加完成后,最后点击「Deploy lobe-chat」 进入部署阶段。
@@ -41,7 +41,7 @@ Vercel's assigned domain DNS may be polluted in some regions, so binding a custo
41
41
  If you have deployed your project using the one-click deployment steps mentioned above, you may find that you are always prompted with "updates available." This is because Vercel creates a new project for you by default instead of forking this project, which causes the inability to accurately detect updates.
42
42
 
43
43
  <Callout>
44
- We recommend following the [Self-Hosting Upstream Sync](/zh/self-hosting/upstream-sync) steps to
44
+ We recommend following the [Self-Hosting Upstream Sync](/zh/docs/self-hosting/upstream-sync) steps to
45
45
  Redeploy.
46
46
  </Callout>
47
47
 
@@ -40,7 +40,7 @@ Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名
40
40
  如果你根据上述中的一键部署步骤部署了自己的项目,你可能会发现总是被提示 “有可用更新”。这是因为 Vercel 默认为你创建新项目而非 fork 本项目,这将导致无法准确检测更新。
41
41
 
42
42
  <Callout>
43
- 我们建议按照 [📘 LobeChat 自部署保持更新](/zh/self-hosting/advanced/upstream-sync) 步骤重新部署。
43
+ 我们建议按照 [📘 LobeChat 自部署保持更新](/zh/docs/self-hosting/advanced/upstream-sync) 步骤重新部署。
44
44
  </Callout>
45
45
 
46
46
  [deploy-button-image]: https://vercel.com/button
@@ -52,7 +52,7 @@ When you need to handle specific tasks, you need to consider creating a custom a
52
52
  If you want to understand Prompt writing tips and common model parameter settings, you can continue to view:
53
53
 
54
54
  <Cards>
55
- <Card href={'/en/usage/agents/prompt'} title={'Prompt User Guide'} />
55
+ <Card href={'/en/docs/usage/agents/prompt'} title={'Prompt User Guide'} />
56
56
 
57
- <Card href={'/en/usage/agents/model'} title={'Large Language Model User Guide'} />
57
+ <Card href={'/en/docs/usage/agents/model'} title={'Large Language Model User Guide'} />
58
58
  </Cards>
@@ -47,7 +47,7 @@ tags:
47
47
  如果你希望理解 Prompt 编写技巧和常见的模型参数设置,可以继续查看:
48
48
 
49
49
  <Cards>
50
- <Card href={'/zh/usage/agents/prompt'} title={'Prompt 使用指南'} />
50
+ <Card href={'/zh/docs/usage/agents/prompt'} title={'Prompt 使用指南'} />
51
51
 
52
- <Card href={'/zh/usage/agents/model'} title={'大语言模型使用指南'} />
52
+ <Card href={'/zh/docs/usage/agents/model'} title={'大语言模型使用指南'} />
53
53
  </Cards>
@@ -11,6 +11,7 @@ tags:
11
11
  - Google AI Gemini
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
+ - Together AI
14
15
  - local model support
15
16
  - Ollama
16
17
  ---
@@ -35,9 +36,14 @@ In this way, LobeChat can more flexibly adapt to the needs of different users, w
35
36
  We have implemented support for the following model service providers:
36
37
 
37
38
  - **AWS Bedrock**: Integrated with AWS Bedrock service, supporting models such as **Claude / LLama2**, providing powerful natural language processing capabilities. [Learn more](https://aws.amazon.com/cn/bedrock)
39
+ - **Anthropic (Claude)**: Accessed Anthropic's **Claude** series models, including Claude 3 and Claude 2, with breakthroughs in multi-modal capabilities and extended context, setting a new industry benchmark. [Learn more](https://www.anthropic.com/claude)
38
40
  - **Google AI (Gemini Pro, Gemini Vision)**: Access to Google's **Gemini** series models, including Gemini and Gemini Pro, to support advanced language understanding and generation. [Learn more](https://deepmind.google/technologies/gemini/)
39
41
  - **ChatGLM**: Added the **ChatGLM** series models from Zhipuai (GLM-4/GLM-4-vision/GLM-3-turbo), providing users with another efficient conversation model choice. [Learn more](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (Dark Side of the Moon)**: Integrated with the Moonshot series models, an innovative AI startup from China, aiming to provide deeper conversation understanding. [Learn more](https://www.moonshot.cn/)
43
+ - **Groq**: Accessed Groq's AI models, efficiently processing message sequences and generating responses, capable of multi-turn dialogues and single-interaction tasks. [Learn more](https://groq.com/)
44
+ - **OpenRouter**: Supports routing of models including **Claude 3**, **Gemma**, **Mistral**, **Llama2** and **Cohere**, with intelligent routing optimization to improve usage efficiency, open and flexible. [Learn more](https://openrouter.ai/)
45
+ - **01.AI (Yi Model)**: Integrated the 01.AI models, with series of APIs featuring fast inference speed, which not only shortened the processing time, but also maintained excellent model performance. [Learn more](https://01.ai/)
46
+ - **Together.ai**: Over 100 leading open-source Chat, Language, Image, Code, and Embedding models are available through the Together Inference API. For these models you pay just for what you use. [Learn more](https://www.together.ai/)
41
47
 
42
48
  At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
43
49
 
@@ -49,4 +55,4 @@ At the same time, we are also planning to support more model service providers,
49
55
  src={'https://github.com/lobehub/lobe-chat/assets/28616219/ca9a21bc-ea6c-4c90-bf4a-fa53b4fb2b5c'}
50
56
  />
51
57
 
52
- To meet the specific needs of users, LobeChat also supports the use of local models based on [Ollama](https://ollama.ai), allowing users to flexibly use their own or third-party models. For more details, see [Local Model Support](/en/usage/features/local-llm).
58
+ To meet the specific needs of users, LobeChat also supports the use of local models based on [Ollama](https://ollama.ai), allowing users to flexibly use their own or third-party models. For more details, see [Local Model Support](/en/docs/usage/features/local-llm).
@@ -12,6 +12,7 @@ tags:
12
12
  - ChatGLM
13
13
  - Moonshot AI
14
14
  - 01 AI
15
+ - Together AI
15
16
  - Ollama
16
17
  ---
17
18
 
@@ -36,9 +37,13 @@ tags:
36
37
 
37
38
  - **AWS Bedrock**:集成了 AWS Bedrock 服务,支持了 **Claude / LLama2** 等模型,提供了强大的自然语言处理能力。[了解更多](https://aws.amazon.com/cn/bedrock)
38
39
  - **Google AI (Gemini Pro、Gemini Vision)**:接入了 Google 的 **Gemini** 系列模型,包括 Gemini 和 Gemini Pro,以支持更高级的语言理解和生成。[了解更多](https://deepmind.google/technologies/gemini/)
40
+ - **Anthropic (Claude)**:接入了 Anthropic 的 **Claude** 系列模型,包括 Claude 3 和 Claude 2,多模态突破,超长上下文,树立行业新基准。[了解更多](https://www.anthropic.com/claude)
39
41
  - **ChatGLM**:加入了智谱的 **ChatGLM** 系列模型(GLM-4/GLM-4-vision/GLM-3-turbo),为用户提供了另一种高效的会话模型选择。[了解更多](https://www.zhipuai.cn/)
40
42
  - **Moonshot AI (月之暗面)**:集成了 Moonshot 系列模型,这是一家来自中国的创新性 AI 创业公司,旨在提供更深层次的会话理解。[了解更多](https://www.moonshot.cn/)
41
- - **01 AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
43
+ - **Together.ai**:集成部署了数百种开源模型和向量模型,无需本地部署即可随时访问这些模型。[了解更多](https://www.together.ai/)
44
+ - **01.AI (零一万物)**:集成了零一万物模型,系列 API 具备较快的推理速度,这不仅缩短了处理时间,同时也保持了出色的模型效果。[了解更多](https://www.lingyiwanwu.com/)
45
+ - **Groq**:接入了 Groq 的 AI 模型,高效处理消息序列,生成回应,胜任多轮对话及单次交互任务。[了解更多](https://groq.com/)
46
+ - **OpenRouter**:其支持包括 **Claude 3**,**Gemma**,**Mistral**,**Llama2**和**Cohere**等模型路由,支持智能路由优化,提升使用效率,开放且灵活。[了解更多](https://openrouter.ai/)
42
47
 
43
48
  同时,我们也在计划支持更多的模型服务商,如 Replicate 和 Perplexity 等,以进一步丰富我们的服务商库。如果你希望让 LobeChat 支持你喜爱的服务商,欢迎加入我们的[社区讨论](https://github.com/lobehub/lobe-chat/discussions/1284)。
44
49
 
@@ -50,4 +55,4 @@ tags:
50
55
  src={'https://github.com/lobehub/lobe-chat/assets/28616219/ca9a21bc-ea6c-4c90-bf4a-fa53b4fb2b5c'}
51
56
  />
52
57
 
53
- 为了满足特定用户的需求,LobeChat 还基于 [Ollama](https://ollama.ai) 支持了本地模型的使用,让用户能够更灵活地使用自己的或第三方的模型,详见 [本地模型支持](/zh/usage/features/local-llm)。
58
+ 为了满足特定用户的需求,LobeChat 还基于 [Ollama](https://ollama.ai) 支持了本地模型的使用,让用户能够更灵活地使用自己的或第三方的模型,详见 [本地模型支持](/zh/docs/usage/features/local-llm)。
@@ -52,7 +52,7 @@ Learn more about [plugin usage](/en-US/usage/plugins/basic) by checking it out.
52
52
 
53
53
  <Callout>
54
54
  If you are interested in plugin development, please refer to our [📘 Plugin Development
55
- Guide](/en/usage/plugins/development) in the Wiki.
55
+ Guide](/en/docs/usage/plugins/development) in the Wiki.
56
56
  </Callout>
57
57
 
58
58
  - [lobe-chat-plugins][lobe-chat-plugins]: This is the plugin index for LobeChat. It retrieves the list of plugins from the index.json of this repository and displays them to the users.
@@ -27,7 +27,7 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
27
27
 
28
28
  此外,这些插件不仅局限于新闻聚合,还可以扩展到其他实用的功能,如快速检索文档、生成图片、获取 Bilibili 、Steam 等各种平台数据,以及与其他各式各样的第三方服务交互。
29
29
 
30
- 通过查看 [插件使用](/zh/usage/plugins/basic) 了解更多。
30
+ 通过查看 [插件使用](/zh/docs/usage/plugins/basic) 了解更多。
31
31
 
32
32
  <Callout type={'tip'}>
33
33
  为了帮助开发者更好地参与到这个生态中来,我们在提供了全面的开发资源。这包括详尽的组件开发文档、功能齐全的软件开发工具包(SDK),以及样板示例,这些都是为了简化开发过程,降低开发者的入门门槛。
@@ -40,7 +40,7 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
40
40
  ## 插件生态体系
41
41
 
42
42
  <Callout>
43
- 如果你对插件开发感兴趣,请在 Wiki 中查阅我们的 [📘 插件开发指南](/zh/usage/plugins/development)。
43
+ 如果你对插件开发感兴趣,请在 Wiki 中查阅我们的 [📘 插件开发指南](/zh/docs/usage/plugins/development)。
44
44
  </Callout>
45
45
 
46
46
  - [lobe-chat-plugins][lobe-chat-plugins]:这是 LobeChat 的插件索引。它从该仓库的 index.json 中获取插件列表并显示给用户。
@@ -34,4 +34,4 @@ If you want to try installing custom plugins on your own, you can use the follow
34
34
 
35
35
  ## Developing Custom Plugins
36
36
 
37
- If you wish to develop a LobeChat plugin on your own, feel free to refer to the [Plugin Development Guide](/en/usage/plugins/development) to expand the possibilities of your AI assistant!
37
+ If you wish to develop a LobeChat plugin on your own, feel free to refer to the [Plugin Development Guide](/en/docs/usage/plugins/development) to expand the possibilities of your AI assistant!
@@ -32,4 +32,4 @@ tags:
32
32
 
33
33
  ## 开发自定义插件
34
34
 
35
- 如果你希望自行开发一个 LobeChat 的插件,欢迎查阅 [插件开发指南](/zh/usage/plugins/development) 以扩展你的 AI 智能助手的可能性边界!
35
+ 如果你希望自行开发一个 LobeChat 的插件,欢迎查阅 [插件开发指南](/zh/docs/usage/plugins/development) 以扩展你的 AI 智能助手的可能性边界!
@@ -27,7 +27,7 @@ This document will guide you on how to use Google Gemma in LobeChat:
27
27
  <Steps>
28
28
  ### Install Ollama locally
29
29
 
30
- First, you need to install Ollama. For the installation process, please refer to the [Ollama usage documentation](/en/usage/providers/ollama).
30
+ First, you need to install Ollama. For the installation process, please refer to the [Ollama usage documentation](/en/docs/usage/providers/ollama).
31
31
 
32
32
  ### Pull Google Gemma model to local using Ollama
33
33
 
@@ -59,9 +59,9 @@ In the session page, open the model panel and then select the Gemma model.
59
59
 
60
60
  <Callout type={'info'}>
61
61
  If you do not see the Ollama provider in the model selection panel, please refer to [Integrating
62
- with Ollama](/en/self-hosting/examples/ollama) to learn how to enable the Ollama provider in
62
+ with Ollama](/en/docs/self-hosting/examples/ollama) to learn how to enable the Ollama provider in
63
63
  LobeChat.
64
-
64
+
65
65
  </Callout>
66
66
  </Steps>
67
67
 
@@ -27,7 +27,7 @@ tags:
27
27
  <Steps>
28
28
  ### 本地安装 Ollama
29
29
 
30
- 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
30
+ 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/docs/usage/providers/ollama)。
31
31
 
32
32
  ### 用 Ollama 拉取 Google Gemma 模型到本地
33
33
 
@@ -59,8 +59,8 @@ ollama pull gemma
59
59
 
60
60
  <Callout type={'info'}>
61
61
  如果你没有在模型选择面板中看到 Ollama 服务商,请查阅 [与 Ollama
62
- 集成](/zh/self-hosting/examples/ollama) 了解如何在 LobeChat 中开启 Ollama 服务商。
63
-
62
+ 集成](/zh/docs/self-hosting/examples/ollama) 了解如何在 LobeChat 中开启 Ollama 服务商。
63
+
64
64
  </Callout>
65
65
  </Steps>
66
66
 
@@ -26,7 +26,7 @@ Now, through the integration of LobeChat and [Ollama](https://ollama.com/), you
26
26
  <Steps>
27
27
  ## Local Installation of Ollama
28
28
 
29
- First, you need to install Ollama. For the installation process, please refer to the [Ollama Usage Document](/en/usage/providers/ollama).
29
+ First, you need to install Ollama. For the installation process, please refer to the [Ollama Usage Document](/en/docs/usage/providers/ollama).
30
30
 
31
31
  ## Pull the Qwen Model to Local with Ollama
32
32
 
@@ -61,8 +61,8 @@ In the LobeChat conversation page, open the model selection panel, and then sele
61
61
  />
62
62
 
63
63
  <Callout type={'info'}>
64
- If you do not see the Ollama provider in the model selection panel, please refer to [Integration with Ollama](/en/self-hosting/examples/ollama) to learn how to enable the Ollama provider in LobeChat.
65
-
64
+ If you do not see the Ollama provider in the model selection panel, please refer to [Integration with Ollama](/en/docs/self-hosting/examples/ollama) to learn how to enable the Ollama provider in LobeChat.
65
+
66
66
  </Callout>
67
67
  </Steps>
68
68
 
@@ -28,7 +28,7 @@ tags:
28
28
  <Steps>
29
29
  ### 本地安装 Ollama
30
30
 
31
- 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
31
+ 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/docs/usage/providers/ollama)。
32
32
 
33
33
  ### 用 Ollama 拉取 Qwen 模型到本地
34
34
 
@@ -58,8 +58,8 @@ ollama pull qwen:14b
58
58
 
59
59
  <Callout type={'info'}>
60
60
  如果你没有在模型选择面板中看到 Ollama 服务商,请查阅 [与 Ollama
61
- 集成](/zh/self-hosting/examples/ollama) 了解如何在 LobeChat 中开启 Ollama 服务商。
62
-
61
+ 集成](/zh/docs/self-hosting/examples/ollama) 了解如何在 LobeChat 中开启 Ollama 服务商。
62
+
63
63
  </Callout>
64
64
  </Steps>
65
65
 
@@ -52,7 +52,7 @@ First, you need to install Ollama, which supports macOS, Windows, and Linux syst
52
52
  ```
53
53
 
54
54
  </Tab>
55
-
55
+
56
56
  </Tabs>
57
57
 
58
58
  ### Pulling Models to Local with Ollama
@@ -76,9 +76,9 @@ Next, you can start conversing with the local LLM using LobeChat.
76
76
  />
77
77
 
78
78
  <Callout type={'info'}>
79
- You can visit [Integrating with Ollama](/en/self-hosting/examples/ollama) to learn how to deploy
79
+ You can visit [Integrating with Ollama](/en/docs/self-hosting/examples/ollama) to learn how to deploy
80
80
  LobeChat to meet the integration requirements with Ollama.
81
-
81
+
82
82
  </Callout>
83
83
  </Steps>
84
84
 
@@ -52,7 +52,7 @@ Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支
52
52
  ```
53
53
 
54
54
  </Tab>
55
-
55
+
56
56
  </Tabs>
57
57
 
58
58
  ### 用 Ollama 拉取模型到本地
@@ -76,8 +76,8 @@ Ollama 支持多种模型,你可以在 [Ollama Library](https://ollama.com/lib
76
76
  />
77
77
 
78
78
  <Callout type={'info'}>
79
- 你可以前往 [与 Ollama 集成](/zh/self-hosting/examples/ollama) 了解如何部署 LobeChat ,以满足与 Ollama 的集成需求。
80
-
79
+ 你可以前往 [与 Ollama 集成](/zh/docs/self-hosting/examples/ollama) 了解如何部署 LobeChat ,以满足与 Ollama 的集成需求。
80
+
81
81
  </Callout>
82
82
  </Steps>
83
83
 
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "بالإضافة إلى العنوان الافتراضي، يجب أن يتضمن http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "عنوان وكيل API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "أدخل مفتاح API الخاص بـ Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Изисква се адрес, включително http(s)://, освен ако не е по подразбиране",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Адрес на API прокси"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Въведете API Key, получен от Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "除默认地址外,必须包含 http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API 代理地址"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Geben Sie Ihren API-Key von Google ein",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Must include http(s):// besides the default address",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "API Proxy Address"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Enter the API Key from Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Aparte de la dirección predeterminada, debe incluir http(s)://",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Dirección del proxy de la API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Introduce la clave API proporcionada por Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Incluez http(s):// en plus de l'adresse par défaut",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Adresse du proxy API"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Saisissez la clé API de Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "Indirizzo dell'API Proxy"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Inserisci la chiave API da Google",
@@ -67,6 +67,11 @@
67
67
  }
68
68
  },
69
69
  "google": {
70
+ "endpoint": {
71
+ "desc": "デフォルトのアドレスに加えて、http(s)://を含める必要があります",
72
+ "placeholder": "https://generativelanguage.googleapis.com",
73
+ "title": "APIプロキシアドレス"
74
+ },
70
75
  "title": "Google",
71
76
  "token": {
72
77
  "desc": "Google の API Key を入力してください",