@lobehub/chat 0.147.0 → 0.147.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.md +8 -8
  3. package/README.zh-CN.md +8 -8
  4. package/docs/self-hosting/advanced/analytics.mdx +1 -0
  5. package/docs/self-hosting/advanced/analytics.zh-CN.mdx +1 -0
  6. package/docs/self-hosting/advanced/authentication.mdx +7 -6
  7. package/docs/self-hosting/advanced/authentication.zh-CN.mdx +7 -6
  8. package/docs/self-hosting/advanced/sso-providers/auth0.mdx +58 -32
  9. package/docs/self-hosting/advanced/sso-providers/auth0.zh-CN.mdx +60 -32
  10. package/docs/self-hosting/advanced/sso-providers/authentik.mdx +33 -29
  11. package/docs/self-hosting/advanced/sso-providers/authentik.zh-CN.mdx +30 -27
  12. package/docs/self-hosting/advanced/sso-providers/github.mdx +49 -20
  13. package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +57 -31
  14. package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.mdx +53 -35
  15. package/docs/self-hosting/advanced/sso-providers/microsoft-entra-id.zh-CN.mdx +43 -31
  16. package/docs/self-hosting/advanced/sso-providers/zitadel.mdx +64 -34
  17. package/docs/self-hosting/advanced/sso-providers/zitadel.zh-CN.mdx +62 -36
  18. package/docs/self-hosting/advanced/upstream-sync.mdx +32 -23
  19. package/docs/self-hosting/advanced/upstream-sync.zh-CN.mdx +32 -23
  20. package/docs/self-hosting/environment-variables/analytics.mdx +1 -0
  21. package/docs/self-hosting/environment-variables/analytics.zh-CN.mdx +1 -0
  22. package/docs/self-hosting/environment-variables/auth.mdx +1 -0
  23. package/docs/self-hosting/environment-variables/auth.zh-CN.mdx +1 -0
  24. package/docs/self-hosting/environment-variables/basic.mdx +1 -0
  25. package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +1 -0
  26. package/docs/self-hosting/environment-variables/model-provider.mdx +1 -0
  27. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +1 -0
  28. package/docs/self-hosting/environment-variables.mdx +1 -0
  29. package/docs/self-hosting/environment-variables.zh-CN.mdx +1 -0
  30. package/docs/self-hosting/examples/azure-openai.mdx +12 -8
  31. package/docs/self-hosting/examples/azure-openai.zh-CN.mdx +12 -8
  32. package/docs/self-hosting/examples/ollama.mdx +1 -0
  33. package/docs/self-hosting/examples/ollama.zh-CN.mdx +1 -0
  34. package/docs/self-hosting/faq/no-v1-suffix.mdx +1 -0
  35. package/docs/self-hosting/faq/no-v1-suffix.zh-CN.mdx +1 -0
  36. package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.mdx +1 -0
  37. package/docs/self-hosting/faq/proxy-with-unable-to-verify-leaf-signature.zh-CN.mdx +1 -0
  38. package/docs/self-hosting/platform/docker-compose.mdx +80 -73
  39. package/docs/self-hosting/platform/docker-compose.zh-CN.mdx +79 -73
  40. package/docs/self-hosting/platform/docker.mdx +85 -85
  41. package/docs/self-hosting/platform/docker.zh-CN.mdx +84 -85
  42. package/docs/self-hosting/platform/netlify.mdx +91 -42
  43. package/docs/self-hosting/platform/netlify.zh-CN.mdx +88 -38
  44. package/docs/self-hosting/platform/railway.mdx +8 -6
  45. package/docs/self-hosting/platform/railway.zh-CN.mdx +8 -6
  46. package/docs/self-hosting/platform/repocloud.mdx +8 -6
  47. package/docs/self-hosting/platform/repocloud.zh-CN.mdx +8 -6
  48. package/docs/self-hosting/platform/sealos.mdx +8 -6
  49. package/docs/self-hosting/platform/sealos.zh-CN.mdx +8 -6
  50. package/docs/self-hosting/platform/vercel.mdx +9 -7
  51. package/docs/self-hosting/platform/vercel.zh-CN.mdx +9 -7
  52. package/docs/self-hosting/platform/zeabur.mdx +8 -6
  53. package/docs/self-hosting/platform/zeabur.zh-CN.mdx +8 -6
  54. package/docs/self-hosting/start.mdx +11 -1
  55. package/docs/self-hosting/start.zh-CN.mdx +2 -1
  56. package/docs/usage/agents/concepts.mdx +13 -2
  57. package/docs/usage/agents/concepts.zh-CN.mdx +13 -2
  58. package/docs/usage/agents/custom-agent.mdx +9 -2
  59. package/docs/usage/agents/custom-agent.zh-CN.mdx +8 -4
  60. package/docs/usage/agents/model.mdx +3 -3
  61. package/docs/usage/agents/model.zh-CN.mdx +6 -5
  62. package/docs/usage/agents/prompt.mdx +7 -5
  63. package/docs/usage/agents/prompt.zh-CN.mdx +7 -5
  64. package/docs/usage/agents/topics.mdx +9 -1
  65. package/docs/usage/agents/topics.zh-CN.mdx +9 -1
  66. package/docs/usage/features/agent-market.mdx +5 -5
  67. package/docs/usage/features/agent-market.zh-CN.mdx +1 -0
  68. package/docs/usage/features/local-llm.mdx +6 -1
  69. package/docs/usage/features/local-llm.zh-CN.mdx +6 -1
  70. package/docs/usage/features/mobile.mdx +5 -1
  71. package/docs/usage/features/mobile.zh-CN.mdx +6 -1
  72. package/docs/usage/features/more.mdx +1 -0
  73. package/docs/usage/features/more.zh-CN.mdx +1 -0
  74. package/docs/usage/features/multi-ai-providers.mdx +11 -2
  75. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +12 -2
  76. package/docs/usage/features/plugin-system.mdx +8 -7
  77. package/docs/usage/features/plugin-system.zh-CN.mdx +9 -7
  78. package/docs/usage/features/pwa.mdx +10 -4
  79. package/docs/usage/features/pwa.zh-CN.mdx +11 -4
  80. package/docs/usage/features/text-to-image.zh-CN.mdx +1 -0
  81. package/docs/usage/features/theme.mdx +6 -1
  82. package/docs/usage/features/theme.zh-CN.mdx +7 -1
  83. package/docs/usage/features/tts.zh-CN.mdx +1 -0
  84. package/docs/usage/features/vision.zh-CN.mdx +1 -0
  85. package/docs/usage/plugins/basic-usage.mdx +31 -7
  86. package/docs/usage/plugins/basic-usage.zh-CN.mdx +31 -7
  87. package/docs/usage/plugins/custom-plugin.mdx +1 -0
  88. package/docs/usage/plugins/custom-plugin.zh-CN.mdx +1 -0
  89. package/docs/usage/plugins/development.mdx +79 -30
  90. package/docs/usage/plugins/development.zh-CN.mdx +80 -31
  91. package/docs/usage/plugins/store.mdx +11 -2
  92. package/docs/usage/plugins/store.zh-CN.mdx +11 -2
  93. package/docs/usage/providers/groq.mdx +32 -12
  94. package/docs/usage/providers/groq.zh-CN.mdx +30 -12
  95. package/docs/usage/providers/ollama/gemma.mdx +27 -11
  96. package/docs/usage/providers/ollama/gemma.zh-CN.mdx +28 -11
  97. package/docs/usage/providers/ollama/qwen.mdx +30 -16
  98. package/docs/usage/providers/ollama/qwen.zh-CN.mdx +25 -11
  99. package/docs/usage/providers/ollama.mdx +27 -14
  100. package/docs/usage/providers/ollama.zh-CN.mdx +27 -14
  101. package/package.json +1 -1
  102. package/src/app/settings/llm/Azure/index.tsx +1 -1
  103. package/src/app/settings/llm/components/ProviderConfig/index.tsx +1 -1
  104. package/src/app/settings/llm/components/ProviderModelList/CustomModelOption.tsx +1 -1
  105. package/src/app/settings/llm/components/ProviderModelList/ModelConfigModal.tsx +1 -1
  106. package/src/app/settings/llm/components/ProviderModelList/ModelFetcher.tsx +3 -3
  107. package/src/app/settings/llm/components/ProviderModelList/index.tsx +2 -2
  108. package/src/features/AgentSetting/AgentConfig/ModelSelect.tsx +1 -1
  109. package/src/features/Conversation/Error/APIKeyForm/ProviderApiKeyForm.tsx +2 -2
  110. package/src/features/ModelSwitchPanel/index.tsx +1 -1
  111. package/src/locales/resources.test.ts +49 -0
  112. package/src/locales/resources.ts +7 -20
  113. package/src/services/_auth.ts +5 -3
  114. package/src/services/_header.ts +3 -4
  115. package/src/services/chat.ts +1 -1
  116. package/src/services/ollama.ts +3 -2
  117. package/src/store/global/slices/settings/actions/llm.test.ts +1 -1
  118. package/src/store/global/slices/settings/actions/llm.ts +2 -2
  119. package/src/store/global/slices/settings/selectors/modelConfig.test.ts +35 -14
  120. package/src/store/global/slices/settings/selectors/modelConfig.ts +55 -118
@@ -14,12 +14,18 @@ tags:
14
14
 
15
15
  # 在 LobeChat 中使用 Groq
16
16
 
17
- <Image alt={'在 LobeChat 中使用 Groq'} cover src={'https://github.com/lobehub/lobe-chat/assets/34400653/d0d08d98-a8d2-4b97-97c0-24a4f01d7eac'} />
17
+ <Image
18
+ alt={'在 LobeChat 中使用 Groq'}
19
+ cover
20
+ src={'https://github.com/lobehub/lobe-chat/assets/34400653/d0d08d98-a8d2-4b97-97c0-24a4f01d7eac'}
21
+ />
18
22
 
19
23
  Groq 的 [LPU 推理引擎](https://wow.groq.com/news_press/groq-lpu-inference-engine-leads-in-first-independent-llm-benchmark/) 在最新的独立大语言模型(LLM)基准测试中表现卓越,以其惊人的速度和效率重新定义了 AI 解决方案的标准。通过 LobeChat 与 Groq Cloud 的集成,你现在可以轻松地利用 Groq 的技术,在 LobeChat 中加速大语言模型的运行。
20
24
 
21
25
  <Callout type={'info'}>
22
- Groq LPU 推理引擎在内部基准测试中连续达到每秒 300 个令牌的速度,据 ArtificialAnalysis.ai 的基准测试确认,Groq 在吞吐量(每秒 241 个令牌)和接收 100 个输出令牌的总时间(0.8 秒)方面优于其他提供商。
26
+ Groq LPU 推理引擎在内部基准测试中连续达到每秒 300 个令牌的速度,据 ArtificialAnalysis.ai
27
+ 的基准测试确认,Groq 在吞吐量(每秒 241 个令牌)和接收 100 个输出令牌的总时间(0.8
28
+ 秒)方面优于其他提供商。
23
29
  </Callout>
24
30
 
25
31
  本文档将指导你如何在 LobeChat 中使用 Groq:
@@ -27,26 +33,38 @@ Groq 的 [LPU 推理引擎](https://wow.groq.com/news_press/groq-lpu-inference-e
27
33
  <Steps>
28
34
  ### 获取 GroqCloud API Key
29
35
 
30
- 首先,你需要到 [GroqCloud Console](https://console.groq.com/) 中获取一个 API Key。
36
+ 首先,你需要到 [GroqCloud Console](https://console.groq.com/) 中获取一个 API Key。
31
37
 
32
- <Image alt={'获取 GroqCloud API Key'} height={274} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/6942287e-fbb1-4a10-a1ce-caaa6663da1e'} />
38
+ <Image
39
+ alt={'获取 GroqCloud API Key'}
40
+ height={274}
41
+ inStep
42
+ src={'https://github.com/lobehub/lobe-chat/assets/34400653/6942287e-fbb1-4a10-a1ce-caaa6663da1e'}
43
+ />
33
44
 
34
- 在控制台的 `API Keys` 菜单中创建一个 API Key。
45
+ 在控制台的 `API Keys` 菜单中创建一个 API Key。
35
46
 
36
- <Image alt={'保存 GroqCloud API Key'} height={274} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/eb57ca57-4f45-4409-91ce-9fa9c7c626d6'} />
47
+ <Image
48
+ alt={'保存 GroqCloud API Key'}
49
+ height={274}
50
+ inStep
51
+ src={'https://github.com/lobehub/lobe-chat/assets/34400653/eb57ca57-4f45-4409-91ce-9fa9c7c626d6'}
52
+ />
37
53
 
38
- <Callout type={'warning'}>
39
- 妥善保存弹窗中的 key,它只会出现一次,如果不小心丢失了,你需要重新创建一个 key。
40
-
54
+ <Callout type={'warning'}>
55
+ 妥善保存弹窗中的 key,它只会出现一次,如果不小心丢失了,你需要重新创建一个 key。
41
56
  </Callout>
42
57
 
43
- ### 在 LobeChat 中配置 Groq
58
+ ### 在 LobeChat 中配置 Groq
44
59
 
45
- 你可以在 `设置` -> `语言模型` 中找到 Groq 的配置选项,将刚才获取的 API Key 填入。
60
+ 你可以在 `设置` -> `语言模型` 中找到 Groq 的配置选项,将刚才获取的 API Key 填入。
46
61
 
47
62
  <Image alt={'Groq 服务商设置'} height={274} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/88948a3a-6681-4a8d-9734-a464e09e4957'} />
48
63
  </Steps>
49
64
 
50
65
  接下来,在助手的模型选项中,选中一个 Groq 支持的模型,就可以在 LobeChat 中体验 Groq 强大的性能了。
51
66
 
52
- <Video alt={'选择 Groq 模型'} src="https://github.com/lobehub/lobe-chat/assets/28616219/b6b8226b-183f-4249-8255-663a5e9f5af4" />
67
+ <Video
68
+ alt={'选择 Groq 模型'}
69
+ src="https://github.com/lobehub/lobe-chat/assets/28616219/b6b8226b-183f-4249-8255-663a5e9f5af4"
70
+ />
@@ -14,7 +14,11 @@ tags:
14
14
 
15
15
  # Using Google Gemma Model
16
16
 
17
- <Image alt={'Using Gemma in LobeChat'} cover src={'https://github.com/lobehub/lobe-chat/assets/28616219/e636cb41-5b7f-4949-a236-1cc1633bd223'} />
17
+ <Image
18
+ alt={'Using Gemma in LobeChat'}
19
+ cover
20
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/e636cb41-5b7f-4949-a236-1cc1633bd223'}
21
+ />
18
22
 
19
23
  [Gemma](https://blog.google/technology/developers/gemma-open-models/) is an open-source large language model (LLM) from Google, designed to provide a more general and flexible model for various natural language processing tasks. Now, with the integration of LobeChat and [Ollama](https://ollama.com/), you can easily use Google Gemma in LobeChat.
20
24
 
@@ -23,23 +27,35 @@ This document will guide you on how to use Google Gemma in LobeChat:
23
27
  <Steps>
24
28
  ### Install Ollama locally
25
29
 
26
- First, you need to install Ollama. For the installation process, please refer to the [Ollama usage documentation](/en/usage/providers/ollama).
30
+ First, you need to install Ollama. For the installation process, please refer to the [Ollama usage documentation](/en/usage/providers/ollama).
27
31
 
28
- ### Pull Google Gemma model to local using Ollama
32
+ ### Pull Google Gemma model to local using Ollama
29
33
 
30
- After installing Ollama, you can install the Google Gemma model using the following command, using the 7b model as an example:
34
+ After installing Ollama, you can install the Google Gemma model using the following command, using the 7b model as an example:
31
35
 
32
- ```bash
33
- ollama pull gemma
34
- ```
36
+ ```bash
37
+ ollama pull gemma
38
+ ```
35
39
 
36
- <Image alt={'Pulling Gemma model using Ollama'} height={473} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/7049a811-a08b-45d3-8491-970f579c2ebd'} width={791} />
40
+ <Image
41
+ alt={'Pulling Gemma model using Ollama'}
42
+ height={473}
43
+ inStep
44
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/7049a811-a08b-45d3-8491-970f579c2ebd'}
45
+ width={791}
46
+ />
37
47
 
38
- ### Select Gemma model
48
+ ### Select Gemma model
39
49
 
40
- In the session page, open the model panel and then select the Gemma model.
50
+ In the session page, open the model panel and then select the Gemma model.
41
51
 
42
- <Image alt={'Selecting Gemma model in the model selection panel'} height={629} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/c91d0c18-a21f-41f6-b5cc-94d29faeb797'} width={791} />
52
+ <Image
53
+ alt={'Selecting Gemma model in the model selection panel'}
54
+ height={629}
55
+ inStep
56
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/c91d0c18-a21f-41f6-b5cc-94d29faeb797'}
57
+ width={791}
58
+ />
43
59
 
44
60
  <Callout type={'info'}>
45
61
  If you do not see the Ollama provider in the model selection panel, please refer to [Integrating
@@ -13,7 +13,12 @@ tags:
13
13
 
14
14
  # 使用 Google Gemma 模型
15
15
 
16
- <Image alt={'在 LobeChat 中使用 Gemma'} cover rounded src={'https://github.com/lobehub/lobe-chat/assets/28616219/e636cb41-5b7f-4949-a236-1cc1633bd223'} />
16
+ <Image
17
+ alt={'在 LobeChat 中使用 Gemma'}
18
+ cover
19
+ rounded
20
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/e636cb41-5b7f-4949-a236-1cc1633bd223'}
21
+ />
17
22
 
18
23
  [Gemma](https://blog.google/technology/developers/gemma-open-models/) 是 Google 开源的一款大语言模型(LLM),旨在提供一个更加通用、灵活的模型用于各种自然语言处理任务。现在,通过 LobeChat 与 [Ollama](https://ollama.com/) 的集成,你可以轻松地在 LobeChat 中使用 Google Gemma。
19
24
 
@@ -22,23 +27,35 @@ tags:
22
27
  <Steps>
23
28
  ### 本地安装 Ollama
24
29
 
25
- 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
30
+ 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
26
31
 
27
- ### 用 Ollama 拉取 Google Gemma 模型到本地
32
+ ### 用 Ollama 拉取 Google Gemma 模型到本地
28
33
 
29
- 在安装完成 Ollama 后,你可以通过以下命令安装 Google Gemma 模型,以 7b 模型为例:
34
+ 在安装完成 Ollama 后,你可以通过以下命令安装 Google Gemma 模型,以 7b 模型为例:
30
35
 
31
- ```bash
32
- ollama pull gemma
33
- ```
36
+ ```bash
37
+ ollama pull gemma
38
+ ```
34
39
 
35
- <Image alt={'使用 Ollama 拉取 Gemma 模型'} height={473} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/7049a811-a08b-45d3-8491-970f579c2ebd'} width={791} />
40
+ <Image
41
+ alt={'使用 Ollama 拉取 Gemma 模型'}
42
+ height={473}
43
+ inStep
44
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/7049a811-a08b-45d3-8491-970f579c2ebd'}
45
+ width={791}
46
+ />
36
47
 
37
- ### 选择 Gemma 模型
48
+ ### 选择 Gemma 模型
38
49
 
39
- 在会话页面中,选择模型面板打开,然后选择 Gemma 模型。
50
+ 在会话页面中,选择模型面板打开,然后选择 Gemma 模型。
40
51
 
41
- <Image alt={'模型选择面板中选择 Gemma 模型'} height={629} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/69414c79-642e-4323-9641-bfa43a74fcc8'} width={791} />
52
+ <Image
53
+ alt={'模型选择面板中选择 Gemma 模型'}
54
+ height={629}
55
+ inStep
56
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/69414c79-642e-4323-9641-bfa43a74fcc8'}
57
+ width={791}
58
+ />
42
59
 
43
60
  <Callout type={'info'}>
44
61
  如果你没有在模型选择面板中看到 Ollama 服务商,请查阅 [与 Ollama
@@ -11,7 +11,11 @@ tags:
11
11
 
12
12
  # Using the Local Qwen Model
13
13
 
14
- <Image alt={'Using Qwen in LobeChat'} cover src={'https://github.com/lobehub/lobe-chat/assets/28616219/7a5fd01a-9fed-49c1-93a3-422269213f19'} />
14
+ <Image
15
+ alt={'Using Qwen in LobeChat'}
16
+ cover
17
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/7a5fd01a-9fed-49c1-93a3-422269213f19'}
18
+ />
15
19
 
16
20
  [Qwen](https://github.com/QwenLM/Qwen1.5) is a large language model (LLM) open-sourced by Alibaba Cloud. It is officially defined as a constantly evolving AI large model, and it achieves more accurate Chinese recognition capabilities through more training set content.
17
21
 
@@ -22,29 +26,39 @@ Now, through the integration of LobeChat and [Ollama](https://ollama.com/), you
22
26
  <Steps>
23
27
  ## Local Installation of Ollama
24
28
 
25
- First, you need to install Ollama. For the installation process, please refer to the [Ollama Usage Document](/en/usage/providers/ollama).
29
+ First, you need to install Ollama. For the installation process, please refer to the [Ollama Usage Document](/en/usage/providers/ollama).
26
30
 
27
- ## Pull the Qwen Model to Local with Ollama
31
+ ## Pull the Qwen Model to Local with Ollama
28
32
 
29
- After installing Ollama, you can install the Qwen model with the following command, taking the 14b model as an example:
33
+ After installing Ollama, you can install the Qwen model with the following command, taking the 14b model as an example:
30
34
 
31
- ```bash
32
- ollama pull qwen:14b
33
- ```
34
- <Callout type={'info'}>
35
- The local version of Qwen provides different model sizes to choose from. Please refer to the
36
- [Qwen's Ollama integration page](https://ollama.com/library/qwen) to understand how to choose the
37
- model size.
38
-
35
+ ```bash
36
+ ollama pull qwen:14b
37
+ ```
38
+
39
+ <Callout type={'info'}>
40
+ The local version of Qwen provides different model sizes to choose from. Please refer to the
41
+ [Qwen's Ollama integration page](https://ollama.com/library/qwen) to understand how to choose the
42
+ model size.
39
43
  </Callout>
40
44
 
41
- <Image alt={'Use Ollama Pull Qwen Model'} height={473} inStep src={'https://github.com/lobehub/lobe-chat/assets/1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a'} />
45
+ <Image
46
+ alt={'Use Ollama Pull Qwen Model'}
47
+ height={473}
48
+ inStep
49
+ src={'https://github.com/lobehub/lobe-chat/assets/1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a'}
50
+ />
42
51
 
43
- ### Select the Qwen Model
52
+ ### Select the Qwen Model
44
53
 
45
- In the LobeChat conversation page, open the model selection panel, and then select the Qwen model.
54
+ In the LobeChat conversation page, open the model selection panel, and then select the Qwen model.
46
55
 
47
- <Image alt={'Choose Qwen Model'} height={430} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/e0608cca-f62f-414a-bc55-28a61ba21f14'} />
56
+ <Image
57
+ alt={'Choose Qwen Model'}
58
+ height={430}
59
+ inStep
60
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/e0608cca-f62f-414a-bc55-28a61ba21f14'}
61
+ />
48
62
 
49
63
  <Callout type={'info'}>
50
64
  If you do not see the Ollama provider in the model selection panel, please refer to [Integration with Ollama](/en/self-hosting/examples/ollama) to learn how to enable the Ollama provider in LobeChat.
@@ -11,7 +11,11 @@ tags:
11
11
 
12
12
  # 使用本地通义千问 Qwen 模型
13
13
 
14
- <Image alt={'在 LobeChat 中使用 Qwen'} cover src={'https://github.com/lobehub/lobe-chat/assets/28616219/7a5fd01a-9fed-49c1-93a3-422269213f19'} />
14
+ <Image
15
+ alt={'在 LobeChat 中使用 Qwen'}
16
+ cover
17
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/7a5fd01a-9fed-49c1-93a3-422269213f19'}
18
+ />
15
19
 
16
20
  [通义千问](https://github.com/QwenLM/Qwen1.5) 是阿里云开源的一款大语言模型(LLM),官方定义是一个不断进化的 AI 大模型,并通过更多的训练集内容达到更精准的中文识别能力。
17
21
 
@@ -24,23 +28,33 @@ tags:
24
28
  <Steps>
25
29
  ### 本地安装 Ollama
26
30
 
27
- 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
31
+ 首先,你需要安装 Ollama,安装过程请查阅 [Ollama 使用文件](/zh/usage/providers/ollama)。
28
32
 
29
- ### 用 Ollama 拉取 Qwen 模型到本地
33
+ ### 用 Ollama 拉取 Qwen 模型到本地
30
34
 
31
- 在安装完成 Ollama 后,你可以通过以下命令安装 Qwen 模型,以 14b 模型为例:
35
+ 在安装完成 Ollama 后,你可以通过以下命令安装 Qwen 模型,以 14b 模型为例:
32
36
 
33
- ```bash
34
- ollama pull qwen:14b
35
- ```
37
+ ```bash
38
+ ollama pull qwen:14b
39
+ ```
36
40
 
37
- <Image alt={'使用 Ollama 拉取 Qwen 模型'} height={473} inStep src={'https://github.com/lobehub/lobe-chat/assets/1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a'} />
41
+ <Image
42
+ alt={'使用 Ollama 拉取 Qwen 模型'}
43
+ height={473}
44
+ inStep
45
+ src={'https://github.com/lobehub/lobe-chat/assets/1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a'}
46
+ />
38
47
 
39
- ### 选择 Qwen 模型
48
+ ### 选择 Qwen 模型
40
49
 
41
- 在会话页面中,选择模型面板打开,然后选择 Qwen 模型。
50
+ 在会话页面中,选择模型面板打开,然后选择 Qwen 模型。
42
51
 
43
- <Image alt={'模型选择面板中选择 Qwen 模型'} height={430} inStep src={'https://github.com/lobehub/lobe-chat/assets/28616219/e0608cca-f62f-414a-bc55-28a61ba21f14'} />
52
+ <Image
53
+ alt={'模型选择面板中选择 Qwen 模型'}
54
+ height={430}
55
+ inStep
56
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/e0608cca-f62f-414a-bc55-28a61ba21f14'}
57
+ />
44
58
 
45
59
  <Callout type={'info'}>
46
60
  如果你没有在模型选择面板中看到 Ollama 服务商,请查阅 [与 Ollama
@@ -11,7 +11,12 @@ tags:
11
11
 
12
12
  # Using Ollama in LobeChat
13
13
 
14
- <Image alt={'Using Ollama in LobeChat'} borderless cover src={'https://github.com/lobehub/lobe-chat/assets/28616219/a2a091b8-ac45-4679-b5e0-21d711e17fef'} />
14
+ <Image
15
+ alt={'Using Ollama in LobeChat'}
16
+ borderless
17
+ cover
18
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/a2a091b8-ac45-4679-b5e0-21d711e17fef'}
19
+ />
15
20
 
16
21
  Ollama is a powerful framework for running large language models (LLMs) locally, supporting various language models including Llama 2, Mistral, and more. Now, LobeChat supports integration with Ollama, meaning you can easily use the language models provided by Ollama to enhance your application within LobeChat.
17
22
 
@@ -20,7 +25,7 @@ This document will guide you on how to use Ollama in LobeChat:
20
25
  <Steps>
21
26
  ### Local Installation of Ollama
22
27
 
23
- First, you need to install Ollama, which supports macOS, Windows, and Linux systems. Depending on your operating system, choose one of the following installation methods:
28
+ First, you need to install Ollama, which supports macOS, Windows, and Linux systems. Depending on your operating system, choose one of the following installation methods:
24
29
 
25
30
  <Tabs items={['macOS', 'Linux', 'Windows (Preview)', 'Docker']}>
26
31
  <Tab>[Download Ollama for macOS](https://ollama.com/download) and unzip it.</Tab>
@@ -34,7 +39,7 @@ This document will guide you on how to use Ollama in LobeChat:
34
39
  ````
35
40
 
36
41
  Alternatively, you can refer to the [Linux manual installation guide](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
37
-
42
+
38
43
  </Tab>
39
44
 
40
45
  <Tab>[Download Ollama for Windows](https://ollama.com/download) and install it.</Tab>
@@ -45,26 +50,30 @@ This document will guide you on how to use Ollama in LobeChat:
45
50
  ```bash
46
51
  docker pull ollama/ollama
47
52
  ```
48
-
53
+
49
54
  </Tab>
50
55
 
51
56
  </Tabs>
52
57
 
53
- ### Pulling Models to Local with Ollama
58
+ ### Pulling Models to Local with Ollama
54
59
 
55
- After installing Ollama, you can install models locally, for example, llama2:
60
+ After installing Ollama, you can install models locally, for example, llama2:
56
61
 
57
- ```bash
58
- ollama pull llama2
59
- ```
62
+ ```bash
63
+ ollama pull llama2
64
+ ```
60
65
 
61
- Ollama supports various models, and you can view the available model list in the [Ollama Library](https://ollama.com/library) and choose the appropriate model based on your needs.
66
+ Ollama supports various models, and you can view the available model list in the [Ollama Library](https://ollama.com/library) and choose the appropriate model based on your needs.
62
67
 
63
- ### Use LLM in LobeChat
68
+ ### Use LLM in LobeChat
64
69
 
65
- Next, you can start conversing with the local LLM using LobeChat.
70
+ Next, you can start conversing with the local LLM using LobeChat.
66
71
 
67
- <Video height={524} inStep src="https://github.com/lobehub/lobe-chat/assets/28616219/063788c8-9fef-4c6b-b837-96668ad6bc41" />
72
+ <Video
73
+ height={524}
74
+ inStep
75
+ src="https://github.com/lobehub/lobe-chat/assets/28616219/063788c8-9fef-4c6b-b837-96668ad6bc41"
76
+ />
68
77
 
69
78
  <Callout type={'info'}>
70
79
  You can visit [Integrating with Ollama](/en/self-hosting/examples/ollama) to learn how to deploy
@@ -77,4 +86,8 @@ This document will guide you on how to use Ollama in LobeChat:
77
86
 
78
87
  You can find Ollama's configuration options in `Settings` -> `Language Model`, where you can configure Ollama's proxy, model name, and more.
79
88
 
80
- <Image alt={'Ollama Service Provider Settings'} height={274} src={'https://github.com/lobehub/lobe-chat/assets/28616219/da0db930-78ce-4262-b648-2b9e43c565c3'} />
89
+ <Image
90
+ alt={'Ollama Service Provider Settings'}
91
+ height={274}
92
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/da0db930-78ce-4262-b648-2b9e43c565c3'}
93
+ />
@@ -12,7 +12,12 @@ tags:
12
12
 
13
13
  # 在 LobeChat 中使用 Ollama
14
14
 
15
- <Image alt={'在 LobeChat 中使用 Ollama'} borderless cover src={'https://github.com/lobehub/lobe-chat/assets/28616219/a2a091b8-ac45-4679-b5e0-21d711e17fef'} />
15
+ <Image
16
+ alt={'在 LobeChat 中使用 Ollama'}
17
+ borderless
18
+ cover
19
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/a2a091b8-ac45-4679-b5e0-21d711e17fef'}
20
+ />
16
21
 
17
22
  Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支持多种语言模型,包括 Llama 2, Mistral 等。现在,LobeChat 已经支持与 Ollama 的集成,这意味着你可以在 LobeChat 中轻松使用 Ollama 提供的语言模型来增强你的应用。
18
23
 
@@ -21,7 +26,7 @@ Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支
21
26
  <Steps>
22
27
  ### 本地安装 Ollama
23
28
 
24
- 首先,你需要安装 Ollama,Ollama 支持 macOS、Windows 和 Linux 系统。 根据你的操作系统,选择以下安装方式之一:
29
+ 首先,你需要安装 Ollama,Ollama 支持 macOS、Windows 和 Linux 系统。 根据你的操作系统,选择以下安装方式之一:
25
30
 
26
31
  <Tabs items={['macOS', 'Linux','Windows (预览版)','Docker']}>
27
32
  <Tab>[下载 Ollama for macOS](https://ollama.com/download) 并解压。</Tab>
@@ -34,7 +39,7 @@ Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支
34
39
  ```
35
40
 
36
41
  或者,你也可以参考 [Linux 手动安装指南](https://github.com/ollama/ollama/blob/main/docs/linux.md)。
37
-
42
+
38
43
  </Tab>
39
44
 
40
45
  <Tab>[下载 Ollama for Windows](https://ollama.com/download) 并安装。</Tab>
@@ -45,26 +50,30 @@ Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支
45
50
  ```bash
46
51
  docker pull ollama/ollama
47
52
  ```
48
-
53
+
49
54
  </Tab>
50
55
 
51
56
  </Tabs>
52
57
 
53
- ### 用 Ollama 拉取模型到本地
58
+ ### 用 Ollama 拉取模型到本地
54
59
 
55
- 在安装完成 Ollama 后,你可以通过以下命安装模型,以 llama2 为例:
60
+ 在安装完成 Ollama 后,你可以通过以下命安装模型,以 llama2 为例:
56
61
 
57
- ```bash
58
- ollama pull llama2
59
- ```
62
+ ```bash
63
+ ollama pull llama2
64
+ ```
60
65
 
61
- Ollama 支持多种模型,你可以在 [Ollama Library](https://ollama.com/library) 中查看可用的模型列表,并根据需求选择合适的模型。
66
+ Ollama 支持多种模型,你可以在 [Ollama Library](https://ollama.com/library) 中查看可用的模型列表,并根据需求选择合适的模型。
62
67
 
63
- ### 在 LobeChat 中使用本地模型
68
+ ### 在 LobeChat 中使用本地模型
64
69
 
65
- 接下来,你就可以使用 LobeChat 与本地 LLM 对话了。
70
+ 接下来,你就可以使用 LobeChat 与本地 LLM 对话了。
66
71
 
67
- <Video height={524} inStep src="https://github.com/lobehub/lobe-chat/assets/28616219/95828c11-0ae5-4dfa-84ed-854124e927a6" />
72
+ <Video
73
+ height={524}
74
+ inStep
75
+ src="https://github.com/lobehub/lobe-chat/assets/28616219/95828c11-0ae5-4dfa-84ed-854124e927a6"
76
+ />
68
77
 
69
78
  <Callout type={'info'}>
70
79
  你可以前往 [与 Ollama 集成](/zh/self-hosting/examples/ollama) 了解如何部署 LobeChat ,以满足与 Ollama 的集成需求。
@@ -76,4 +85,8 @@ Ollama 是一款强大的本地运行大型语言模型(LLM)的框架,支
76
85
 
77
86
  你可以在 `设置` -> `语言模型` 中找到 Ollama 的配置选项,你可以在这里配置 Ollama 的代理、模型名称等。
78
87
 
79
- <Image alt={'Ollama 服务商设置'} height={274} src={'https://github.com/lobehub/lobe-chat/assets/28616219/da0db930-78ce-4262-b648-2b9e43c565c3'} />
88
+ <Image
89
+ alt={'Ollama 服务商设置'}
90
+ height={274}
91
+ src={'https://github.com/lobehub/lobe-chat/assets/28616219/da0db930-78ce-4262-b648-2b9e43c565c3'}
92
+ />
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.147.0",
3
+ "version": "0.147.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -34,7 +34,7 @@ const AzureOpenAIProvider = memo(() => {
34
34
 
35
35
  // Get the first model card's deployment name as the check model
36
36
  const checkModel = useGlobalStore((s) => {
37
- const chatModelCards = modelConfigSelectors.providerModelCards(providerKey)(s);
37
+ const chatModelCards = modelConfigSelectors.getModelCardsByProviderId(providerKey)(s);
38
38
 
39
39
  if (chatModelCards.length > 0) {
40
40
  return chatModelCards[0].deploymentName;
@@ -54,7 +54,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
54
54
  const [toggleProviderEnabled, setSettings, enabled] = useGlobalStore((s) => [
55
55
  s.toggleProviderEnabled,
56
56
  s.setSettings,
57
- modelConfigSelectors.providerEnabled(provider)(s),
57
+ modelConfigSelectors.isProviderEnabled(provider)(s),
58
58
  ]);
59
59
 
60
60
  useSyncSettings(form);
@@ -29,7 +29,7 @@ const CustomModelOption = memo<CustomModelOptionProps>(({ id, provider }) => {
29
29
  s.removeEnabledModels,
30
30
  ]);
31
31
  const modelCard = useGlobalStore(
32
- modelConfigSelectors.getCustomModelCardById({ id, provider }),
32
+ modelConfigSelectors.getCustomModelCard({ id, provider }),
33
33
  isEqual,
34
34
  );
35
35
 
@@ -27,7 +27,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, pro
27
27
  ]);
28
28
 
29
29
  const modelCard = useGlobalStore(
30
- modelConfigSelectors.getCustomModelCardById({ id, provider: editingProvider }),
30
+ modelConfigSelectors.getCustomModelCard({ id, provider: editingProvider }),
31
31
  isEqual,
32
32
  );
33
33
 
@@ -36,12 +36,12 @@ const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
36
36
  s.useFetchProviderModelList,
37
37
  s.setModelProviderConfig,
38
38
  ]);
39
- const enabledAutoFetch = useGlobalStore(modelConfigSelectors.enabledAutoFetchModels(provider));
39
+ const enabledAutoFetch = useGlobalStore(modelConfigSelectors.isAutoFetchModelsEnabled(provider));
40
40
  const latestFetchTime = useGlobalStore(
41
- (s) => modelConfigSelectors.providerConfig(provider)(s)?.latestFetchTime,
41
+ (s) => modelConfigSelectors.getConfigByProviderId(provider)(s)?.latestFetchTime,
42
42
  );
43
43
  const totalModels = useGlobalStore(
44
- (s) => modelConfigSelectors.providerModelCards(provider)(s).length,
44
+ (s) => modelConfigSelectors.getModelCardsByProviderId(provider)(s).length,
45
45
  );
46
46
 
47
47
  const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch);
@@ -51,7 +51,7 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
51
51
  ]);
52
52
 
53
53
  const chatModelCards = useGlobalStore(
54
- modelConfigSelectors.providerModelCards(provider),
54
+ modelConfigSelectors.getModelCardsByProviderId(provider),
55
55
  isEqual,
56
56
  );
57
57
 
@@ -60,7 +60,7 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
60
60
  isEqual,
61
61
  );
62
62
  const enabledModels = useGlobalStore(
63
- modelConfigSelectors.providerEnableModels(provider),
63
+ modelConfigSelectors.getEnableModelsByProviderId(provider),
64
64
  isEqual,
65
65
  );
66
66
 
@@ -25,7 +25,7 @@ interface ModelOption {
25
25
 
26
26
  const ModelSelect = memo(() => {
27
27
  const [model, updateConfig] = useStore((s) => [s.config.model, s.setAgentConfig]);
28
- const enabledList = useGlobalStore(modelConfigSelectors.enabledModelProviderList, isEqual);
28
+ const enabledList = useGlobalStore(modelConfigSelectors.providerListForModelSelect, isEqual);
29
29
  const { styles } = useStyles();
30
30
 
31
31
  const options = useMemo<SelectProps['options']>(() => {
@@ -24,8 +24,8 @@ const ProviderApiKeyForm = memo<ProviderApiKeyFormProps>(
24
24
  const [showProxy, setShow] = useState(false);
25
25
 
26
26
  const [apiKey, proxyUrl, setConfig] = useGlobalStore((s) => [
27
- modelConfigSelectors.providerConfig(provider)(s)?.apiKey,
28
- modelConfigSelectors.providerConfig(provider)(s)?.endpoint,
27
+ modelConfigSelectors.getConfigByProviderId(provider)(s)?.apiKey,
28
+ modelConfigSelectors.getConfigByProviderId(provider)(s)?.endpoint,
29
29
  s.setModelProviderConfig,
30
30
  ]);
31
31
 
@@ -44,7 +44,7 @@ const ModelSwitchPanel = memo<PropsWithChildren>(({ children }) => {
44
44
  const updateAgentConfig = useSessionStore((s) => s.updateAgentConfig);
45
45
 
46
46
  const router = useRouter();
47
- const enabledList = useGlobalStore(modelConfigSelectors.enabledModelProviderList, isEqual);
47
+ const enabledList = useGlobalStore(modelConfigSelectors.providerListForModelSelect, isEqual);
48
48
 
49
49
  const items = useMemo(() => {
50
50
  const getModelItems = (provider: ModelProviderCard) => {
@@ -0,0 +1,49 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { normalizeLocale } from './resources';
4
+
5
+ describe('normalizeLocale', () => {
6
+ it('should return "en-US" when locale is undefined', () => {
7
+ expect(normalizeLocale()).toBe('en-US');
8
+ });
9
+
10
+ it('should return "zh-CN" when locale is "zh-CN"', () => {
11
+ expect(normalizeLocale('zh-CN')).toBe('zh-CN');
12
+ });
13
+
14
+ it('should return "zh-CN" when locale is "zh"', () => {
15
+ expect(normalizeLocale('zh')).toBe('zh-CN');
16
+ });
17
+
18
+ it('should return "de-DE" when locale is "de"', () => {
19
+ expect(normalizeLocale('de')).toBe('de-DE');
20
+ });
21
+
22
+ it('should return "ru-RU" when locale is "ru"', () => {
23
+ expect(normalizeLocale('ru')).toBe('ru-RU');
24
+ });
25
+
26
+ it('should return "ar" when locale is "ar-EG"', () => {
27
+ expect(normalizeLocale('ar')).toBe('ar');
28
+ expect(normalizeLocale('ar-EG')).toBe('ar');
29
+ });
30
+
31
+ it('should return "en-US" when locale is "en"', () => {
32
+ expect(normalizeLocale('en')).toBe('en-US');
33
+ });
34
+
35
+ it('should return the input locale for other valid locales', () => {
36
+ expect(normalizeLocale('fr-FR')).toBe('fr-FR');
37
+ expect(normalizeLocale('ja-JP')).toBe('ja-JP');
38
+ expect(normalizeLocale('ko-KR')).toBe('ko-KR');
39
+ expect(normalizeLocale('pt-BR')).toBe('pt-BR');
40
+ expect(normalizeLocale('tr-TR')).toBe('tr-TR');
41
+ expect(normalizeLocale('vi-VN')).toBe('vi-VN');
42
+ expect(normalizeLocale('zh-TW')).toBe('zh-TW');
43
+ });
44
+
45
+ it('should return the input locale for unknown locales', () => {
46
+ expect(normalizeLocale('unknown')).toBe('en-US');
47
+ expect(normalizeLocale('fr')).toBe('fr-FR');
48
+ });
49
+ });