@lobehub/chat 1.68.2 → 1.68.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/usage/providers/azureai.mdx +69 -0
  4. package/docs/usage/providers/azureai.zh-CN.mdx +69 -0
  5. package/docs/usage/providers/deepseek.mdx +3 -3
  6. package/docs/usage/providers/deepseek.zh-CN.mdx +5 -4
  7. package/docs/usage/providers/jina.mdx +51 -0
  8. package/docs/usage/providers/jina.zh-CN.mdx +51 -0
  9. package/docs/usage/providers/lmstudio.mdx +75 -0
  10. package/docs/usage/providers/lmstudio.zh-CN.mdx +75 -0
  11. package/docs/usage/providers/nvidia.mdx +55 -0
  12. package/docs/usage/providers/nvidia.zh-CN.mdx +55 -0
  13. package/docs/usage/providers/ppio.mdx +7 -7
  14. package/docs/usage/providers/ppio.zh-CN.mdx +6 -6
  15. package/docs/usage/providers/sambanova.mdx +50 -0
  16. package/docs/usage/providers/sambanova.zh-CN.mdx +50 -0
  17. package/docs/usage/providers/tencentcloud.mdx +49 -0
  18. package/docs/usage/providers/tencentcloud.zh-CN.mdx +49 -0
  19. package/docs/usage/providers/vertexai.mdx +59 -0
  20. package/docs/usage/providers/vertexai.zh-CN.mdx +59 -0
  21. package/docs/usage/providers/vllm.mdx +98 -0
  22. package/docs/usage/providers/vllm.zh-CN.mdx +98 -0
  23. package/docs/usage/providers/volcengine.mdx +47 -0
  24. package/docs/usage/providers/volcengine.zh-CN.mdx +48 -0
  25. package/locales/ar/chat.json +29 -0
  26. package/locales/ar/models.json +48 -0
  27. package/locales/ar/providers.json +3 -0
  28. package/locales/bg-BG/chat.json +29 -0
  29. package/locales/bg-BG/models.json +48 -0
  30. package/locales/bg-BG/providers.json +3 -0
  31. package/locales/de-DE/chat.json +29 -0
  32. package/locales/de-DE/models.json +48 -0
  33. package/locales/de-DE/providers.json +3 -0
  34. package/locales/en-US/chat.json +29 -0
  35. package/locales/en-US/models.json +48 -0
  36. package/locales/en-US/providers.json +3 -3
  37. package/locales/es-ES/chat.json +29 -0
  38. package/locales/es-ES/models.json +48 -0
  39. package/locales/es-ES/providers.json +3 -0
  40. package/locales/fa-IR/chat.json +29 -0
  41. package/locales/fa-IR/models.json +48 -0
  42. package/locales/fa-IR/providers.json +3 -0
  43. package/locales/fr-FR/chat.json +29 -0
  44. package/locales/fr-FR/models.json +48 -0
  45. package/locales/fr-FR/providers.json +3 -0
  46. package/locales/it-IT/chat.json +29 -0
  47. package/locales/it-IT/models.json +48 -0
  48. package/locales/it-IT/providers.json +3 -0
  49. package/locales/ja-JP/chat.json +29 -0
  50. package/locales/ja-JP/models.json +48 -0
  51. package/locales/ja-JP/providers.json +3 -0
  52. package/locales/ko-KR/chat.json +29 -0
  53. package/locales/ko-KR/models.json +48 -0
  54. package/locales/ko-KR/providers.json +3 -0
  55. package/locales/nl-NL/chat.json +29 -0
  56. package/locales/nl-NL/models.json +48 -0
  57. package/locales/nl-NL/providers.json +3 -0
  58. package/locales/pl-PL/chat.json +29 -0
  59. package/locales/pl-PL/models.json +48 -0
  60. package/locales/pl-PL/providers.json +3 -0
  61. package/locales/pt-BR/chat.json +29 -0
  62. package/locales/pt-BR/models.json +48 -0
  63. package/locales/pt-BR/providers.json +3 -0
  64. package/locales/ru-RU/chat.json +29 -0
  65. package/locales/ru-RU/models.json +48 -0
  66. package/locales/ru-RU/providers.json +3 -0
  67. package/locales/tr-TR/chat.json +29 -0
  68. package/locales/tr-TR/models.json +48 -0
  69. package/locales/tr-TR/providers.json +3 -0
  70. package/locales/vi-VN/chat.json +29 -0
  71. package/locales/vi-VN/models.json +48 -0
  72. package/locales/vi-VN/providers.json +3 -0
  73. package/locales/zh-CN/chat.json +29 -0
  74. package/locales/zh-CN/models.json +51 -3
  75. package/locales/zh-CN/providers.json +3 -4
  76. package/locales/zh-TW/chat.json +29 -0
  77. package/locales/zh-TW/models.json +48 -0
  78. package/locales/zh-TW/providers.json +3 -0
  79. package/package.json +1 -1
  80. package/packages/web-crawler/src/crawImpl/__test__/jina.test.ts +169 -0
  81. package/packages/web-crawler/src/crawImpl/jina.ts +1 -1
  82. package/packages/web-crawler/src/crawImpl/naive.ts +29 -3
  83. package/packages/web-crawler/src/urlRules.ts +7 -1
  84. package/packages/web-crawler/src/utils/errorType.ts +7 -0
  85. package/scripts/serverLauncher/startServer.js +11 -7
  86. package/src/config/modelProviders/ppio.ts +1 -1
  87. package/src/features/Conversation/Extras/Assistant.tsx +12 -20
  88. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +130 -0
  89. package/src/features/Conversation/Extras/Usage/UsageDetail/TokenProgress.tsx +71 -0
  90. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +146 -0
  91. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +94 -0
  92. package/src/features/Conversation/Extras/Usage/index.tsx +40 -0
  93. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +14 -0
  94. package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -0
  95. package/src/libs/agent-runtime/utils/streams/openai.test.ts +100 -10
  96. package/src/libs/agent-runtime/utils/streams/openai.ts +30 -4
  97. package/src/libs/agent-runtime/utils/streams/protocol.ts +4 -0
  98. package/src/locales/default/chat.ts +30 -1
  99. package/src/server/routers/tools/search.ts +1 -1
  100. package/src/store/aiInfra/slices/aiModel/initialState.ts +3 -1
  101. package/src/store/aiInfra/slices/aiModel/selectors.test.ts +1 -0
  102. package/src/store/aiInfra/slices/aiModel/selectors.ts +5 -0
  103. package/src/store/aiInfra/slices/aiProvider/action.ts +3 -1
  104. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +5 -1
  105. package/src/store/chat/slices/message/action.ts +3 -0
  106. package/src/store/global/initialState.ts +1 -0
  107. package/src/store/global/selectors/systemStatus.ts +2 -0
  108. package/src/types/message/base.ts +18 -0
  109. package/src/types/message/chat.ts +4 -3
  110. package/src/utils/fetch/fetchSSE.ts +24 -1
  111. package/src/utils/format.ts +3 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.68.4](https://github.com/lobehub/lobe-chat/compare/v1.68.3...v1.68.4)
6
+
7
+ <sup>Released on **2025-03-04**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Support to show token usages.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Support to show token usages, closes [#6693](https://github.com/lobehub/lobe-chat/issues/6693) ([71a638e](https://github.com/lobehub/lobe-chat/commit/71a638e))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.68.3](https://github.com/lobehub/lobe-chat/compare/v1.68.2...v1.68.3)
31
+
32
+ <sup>Released on **2025-03-03**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Improve url rules.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Improve url rules, closes [#6669](https://github.com/lobehub/lobe-chat/issues/6669) ([5ee59e3](https://github.com/lobehub/lobe-chat/commit/5ee59e3))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.68.2](https://github.com/lobehub/lobe-chat/compare/v1.68.1...v1.68.2)
6
56
 
7
57
  <sup>Released on **2025-03-03**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Support to show token usages."
6
+ ]
7
+ },
8
+ "date": "2025-03-04",
9
+ "version": "1.68.4"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Improve url rules."
15
+ ]
16
+ },
17
+ "date": "2025-03-03",
18
+ "version": "1.68.3"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
@@ -0,0 +1,69 @@
1
+ ---
2
+ title: Using Azure AI API Key in LobeChat
3
+ description: Learn how to configure and use Azure AI models in LobeChat, get the API key, and start a conversation.
4
+ tags:
5
+ - LobeChat
6
+ - Azure AI
7
+ - API Key
8
+ - Web UI
9
+ ---
10
+
11
+ # Using Azure AI in LobeChat
12
+
13
+ <Image alt={'Using Azure AI in LobeChat'} cover src={'https://github.com/user-attachments/assets/81d0349a-44fe-4dfc-bbc4-8e9a1e09567d'} />
14
+
15
+ [Azure AI](https://azure.microsoft.com) is an open artificial intelligence technology platform based on the Microsoft Azure cloud platform. It provides various AI functionalities, including natural language processing, machine learning, and computer vision, helping businesses easily develop and deploy AI applications.
16
+
17
+ This document will guide you on how to integrate Azure AI models into LobeChat:
18
+
19
+ <Steps>
20
+ ### Step 1: Deploy Azure AI Project and Model
21
+
22
+ - First, visit [Azure AI Foundry](https://ai.azure.com/) and complete the registration and login process.
23
+ - After logging in, select `Browse models` on the homepage.
24
+
25
+ <Image alt={'Accessing Azure AI Foundry'} inStep src={'https://github.com/user-attachments/assets/1c6a3e42-8e24-4148-b2c3-0bfe60a8cf77'} />
26
+
27
+ - Choose the model you want in the model marketplace.
28
+ - Enter the model details and click the `Deploy` button.
29
+
30
+ <Image alt={'Browsing Models'} inStep src={'https://github.com/user-attachments/assets/3ed3226c-3d4c-49ef-b2c0-8953dac8a92e'} />
31
+
32
+ - In the pop-up dialog, create a new project.
33
+
34
+ <Image alt={'Creating a New Project'} inStep src={'https://github.com/user-attachments/assets/199b862a-5de4-4a54-83b2-f4dbf69be902'} />
35
+
36
+ <Callout type={'note'}>
37
+ For detailed configuration of Azure AI Foundry, please refer to the [official documentation](https://learn.microsoft.com/azure/ai-foundry/model-inference/).
38
+ </Callout>
39
+
40
+ ### Step 2: Obtain the Model's API Key and Endpoint
41
+
42
+ - In the details of the deployed model, you can find the Endpoint and API Key information.
43
+ - Copy and save the obtained information.
44
+
45
+ <Image alt={'Obtaining API Key'} inStep src={'https://github.com/user-attachments/assets/30c33426-412d-4dec-b096-317fe5880e79'} />
46
+
47
+ ### Step 3: Configure Azure AI in LobeChat
48
+
49
+ - Visit the `App Settings` and `AI Service Provider` interface in LobeChat.
50
+ - Find the settings for `Azure AI` in the list of providers.
51
+
52
+ <Image alt={'Entering Azure AI API Key'} inStep src={'https://github.com/user-attachments/assets/eb41f77f-ccdd-4a48-a8a2-7badac868c03'} />
53
+
54
+ - Enable the Azure AI service provider and fill in the obtained Endpoint and API Key.
55
+
56
+ <Callout type={'warning'}>
57
+ For the Endpoint, you only need to fill in the first part: `https://xxxxxx.services.ai.azure.com/models`.
58
+ </Callout>
59
+
60
+ - Choose an Azure AI model for your assistant and start the conversation.
61
+
62
+ <Image alt={'Selecting Azure AI Model'} inStep src={'https://github.com/user-attachments/assets/a1ba8ec0-e259-4da4-8980-0cf82ca5f52b'} />
63
+
64
+ <Callout type={'warning'}>
65
+ You may need to pay the API service provider for usage. Please refer to Azure AI's relevant pricing policies.
66
+ </Callout>
67
+ </Steps>
68
+
69
+ Now you can use the models provided by Azure AI in LobeChat for conversations.
@@ -0,0 +1,69 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 Azure AI API Key
3
+ description: 学习如何在 LobeChat 中配置和使用 Azure AI 模型,获取 API 密钥并开始对话。
4
+ tags:
5
+ - LobeChat
6
+ - Azure AI
7
+ - API密钥
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 Azure AI
12
+
13
+ <Image alt={'在 LobeChat 中使用 Azure AI '} cover src={'https://github.com/user-attachments/assets/81d0349a-44fe-4dfc-bbc4-8e9a1e09567d'} />
14
+
15
+ [Azure AI](https://azure.microsoft.com) 是一个基于 Microsoft Azure 云平台的开放式人工智能技术平台,提供包括自然语言处理、机器学习、计算机视觉等多种 AI 功能,帮助企业轻松开发和部署 AI 应用。
16
+
17
+ 本文档将指导你如何在 LobeChat 中接入 Azure AI 的模型:
18
+
19
+ <Steps>
20
+ ### 步骤一:部署 Azure AI 项目以及模型
21
+
22
+ - 首先,访问[Azure AI Foundry](https://ai.azure.com/)并完成注册登录
23
+ - 登录后在首页选择`浏览模型`
24
+
25
+ <Image alt={'进入 Azure AI Foundry'} inStep src={'https://github.com/user-attachments/assets/1c6a3e42-8e24-4148-b2c3-0bfe60a8cf77'} />
26
+
27
+ - 在模型广场中选择你想要模型
28
+ - 进入模型详情,点击`部署`按钮
29
+
30
+ <Image alt={'浏览模型'} inStep src={'https://github.com/user-attachments/assets/3ed3226c-3d4c-49ef-b2c0-8953dac8a92e'} />
31
+
32
+ - 在弹出的对话框中创建一个新的项目
33
+
34
+ <Image alt={'创建新项目'} inStep src={'https://github.com/user-attachments/assets/199b862a-5de4-4a54-83b2-f4dbf69be902'} />
35
+
36
+ <Callout type={'note'}>
37
+ Azure AI Foundry 的详细配置请参考[官方文档](https://learn.microsoft.com/azure/ai-foundry/model-inference/)
38
+ </Callout>
39
+
40
+ ### 步骤二:获取模型的 API Key 及 Endpoint
41
+
42
+ - 在已部署的模型详情里,可以查询到 Endpoint 以及 API Key 信息
43
+ - 复制并保存好获取的信息
44
+
45
+ <Image alt={'获取 API Key'} inStep src={'https://github.com/user-attachments/assets/30c33426-412d-4dec-b096-317fe5880e79'} />
46
+
47
+ ### 步骤三:在 LobeChat 中配置 Azure AI
48
+
49
+ - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面
50
+ - 在供应商列表中找到 `Azure AI` 的设置项
51
+
52
+ <Image alt={'填写 Azure AI API 密钥'} inStep src={'https://github.com/user-attachments/assets/eb41f77f-ccdd-4a48-a8a2-7badac868c03'} />
53
+
54
+ - 打开 Azure AI 服务商并填入获取的 Endpoint 以及 API 密钥
55
+
56
+ <Callout type={'warning'}>
57
+ Endpoint 只需要填入前面部分 `https://xxxxxx.services.ai.azure.com/models` 即可
58
+ </Callout>
59
+
60
+ - 为你的助手选择一个 Azure AI 模型即可开始对话
61
+
62
+ <Image alt={'选择 Azure AI 模型'} inStep src={'https://github.com/user-attachments/assets/a1ba8ec0-e259-4da4-8980-0cf82ca5f52b'} />
63
+
64
+ <Callout type={'warning'}>
65
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 Azure AI 的相关费用政策。
66
+ </Callout>
67
+ </Steps>
68
+
69
+ 至此你已经可以在 LobeChat 中使用 Azure AI 提供的模型进行对话了。
@@ -15,7 +15,7 @@ tags:
15
15
 
16
16
  <Image alt={'Using DeepSeek in LobeChat'} cover src={'https://github.com/lobehub/lobe-chat/assets/34400653/b4d12904-9d5d-46de-bd66-901eeb9c8e52'} />
17
17
 
18
- [DeepSeek](https://www.deepseek.com/) is an advanced open-source Large Language Model (LLM). The latest version, DeepSeek-V2, has made significant optimizations in architecture and performance, reducing training costs by 42.5% and inference costs by 93.3%.
18
+ [DeepSeek](https://www.deepseek.com/) represents a cutting-edge open-source large language model. The latest versions, DeepSeek-V3 and DeepSeek-R1, have undergone substantial improvements in both architecture and performance, particularly shining in their inference capabilities. By leveraging innovative training methodologies and reinforcement learning, the model has effectively boosted its inference prowess, now nearly matching the pinnacle performance of OpenAI.
19
19
 
20
20
  This document will guide you on how to use DeepSeek in LobeChat:
21
21
 
@@ -48,12 +48,12 @@ This document will guide you on how to use DeepSeek in LobeChat:
48
48
  - Access the `App Settings` interface in LobeChat.
49
49
  - Find the setting for `DeepSeek` under `Language Models`.
50
50
 
51
- <Image alt={'Enter Deepseek API Key'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/bae262d6-0d49-47f3-bc9c-356cf6f3f34e'} />
51
+ <Image alt={'Enter Deepseek API Key'} inStep src={'https://github.com/user-attachments/assets/aaa3e2c5-7f16-4cfb-86b6-2814a1aafe3a'} />
52
52
 
53
53
  - Open DeepSeek and enter the obtained API key.
54
54
  - Choose a DeepSeek model for your assistant to start the conversation.
55
55
 
56
- <Image alt={'Select Deepseek Model'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/84568505-6efe-4518-8888-682ccdd92197'} />
56
+ <Image alt={'Select Deepseek Model'} inStep src={'https://github.com/user-attachments/assets/84a5c971-1262-4639-b79f-c8b138530803'} />
57
57
 
58
58
  <Callout type={'warning'}>
59
59
  You may need to pay the API service provider during usage, please refer to DeepSeek's relevant
@@ -4,6 +4,7 @@ description: 学习如何在 LobeChat 中配置和使用 DeepSeek 语言模型
4
4
  tags:
5
5
  - LobeChat
6
6
  - DeepSeek
7
+ - DeepSeek R1
7
8
  - API密钥
8
9
  - Web UI
9
10
  ---
@@ -12,9 +13,9 @@ tags:
12
13
 
13
14
  <Image alt={'在 LobeChat 中使用 DeepSeek'} cover src={'https://github.com/lobehub/lobe-chat/assets/34400653/b4d12904-9d5d-46de-bd66-901eeb9c8e52'} />
14
15
 
15
- [DeepSeek](https://www.deepseek.com/) 是一款先进的开源大型语言模型(LLM)。最新版本 DeepSeek-V2 在架构和性能上进行了显著优化,同时训练成本降低了 42.5%, 推理成本降低了 93.3%。
16
+ [DeepSeek](https://www.deepseek.com/) 是一款先进的开源大型语言模型(LLM)。最新的 DeepSeek-V3 DeepSeek-R1 在架构和性能上进行了显著优化,特别是在推理能力方面表现出色。它通过创新性的训练方法和强化学习技术,成功地提升了模型的推理能力,并且其性能已逼近 OpenAI 的顶尖水平。
16
17
 
17
- 本文档将指导你如何在 LobeChat 中使用 DeepSeek:
18
+ 本文档将指导你如何在 LobeChat 中使用 DeepSeek
18
19
 
19
20
  <Steps>
20
21
  ### 步骤一:获取 DeepSeek API 密钥
@@ -44,12 +45,12 @@ tags:
44
45
  - 访问 LobeChat 的 `应用设置`界面
45
46
  - 在 `语言模型` 下找到 `DeepSeek` 的设置项
46
47
 
47
- <Image alt={'填写 Deepseek API 密钥'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/bae262d6-0d49-47f3-bc9c-356cf6f3f34e'} />
48
+ <Image alt={'填写 Deepseek API 密钥'} inStep src={'https://github.com/user-attachments/assets/aaa3e2c5-7f16-4cfb-86b6-2814a1aafe3a'} />
48
49
 
49
50
  - 打开 DeepSeek 并填入获取的 API 密钥
50
51
  - 为你的助手选择一个 DeepSeek 模型即可开始对话
51
52
 
52
- <Image alt={'选择 Deepseek 模型'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/84568505-6efe-4518-8888-682ccdd92197'} />
53
+ <Image alt={'选择 Deepseek 模型'} inStep src={'https://github.com/user-attachments/assets/84a5c971-1262-4639-b79f-c8b138530803'} />
53
54
 
54
55
  <Callout type={'warning'}>
55
56
  在使用过程中你可能需要向 API 服务提供商付费,请参考 DeepSeek 的相关费用政策。
@@ -0,0 +1,51 @@
1
+ ---
2
+ title: Using Jina AI API Key in LobeChat
3
+ description: Learn how to configure and use Jina AI models in LobeChat, obtain an API key, and start conversations.
4
+ tags:
5
+ - LobeChat
6
+ - Jina AI
7
+ - API Key
8
+ - Web UI
9
+ ---
10
+
11
+ # Using Jina AI in LobeChat
12
+
13
+ <Image alt={'Using Jina AI in LobeChat'} cover src={'https://github.com/user-attachments/assets/840442b1-bf56-4a5f-9700-b3608b16a8a5'} />
14
+
15
+ [Jina AI](https://jina.ai/) is an open-source neural search company founded in 2020. It focuses on using deep learning technology to process multimodal data, providing efficient information retrieval solutions and supporting search for various data types such as text, images, and videos.
16
+
17
+ This document will guide you on how to use Jina AI in LobeChat:
18
+
19
+ <Steps>
20
+ ### Step 1: Obtain a Jina AI API Key
21
+
22
+ - Visit the [Jina AI official website](https://jina.ai/) and click the `API` button on the homepage.
23
+
24
+ <Image alt={'Obtain a Jina AI API Key'} inStep src={'https://github.com/user-attachments/assets/5ea37821-4ea8-437c-a15e-3b182d10f19e'} />
25
+
26
+ - Find the API Key generated for you in the `API Key` menu below.
27
+ - Copy and save the generated API Key.
28
+
29
+ <Callout type={'info'}>
30
+ * Jina AI provides each user with 1M free API Tokens, and the API can be used without registration.
31
+ * If you need to manage the API Key or recharge the API, you need to register and log in to the [Jina AI Console](https://jina.ai/api-dashboard/).
32
+ </Callout>
33
+
34
+ ### Step 2: Configure Jina AI in LobeChat
35
+
36
+ - Visit LobeChat's `Application Settings` interface.
37
+ - Find the `Jina AI` setting under `Language Model`.
38
+
39
+ <Image alt={'Fill in Jina AI API Key'} inStep src={'https://github.com/user-attachments/assets/1077bee5-b379-4063-b7bd-23b98ec146e2'} />
40
+
41
+ - Enable Jina AI and fill in the obtained API Key.
42
+ - Select a Jina AI model for your assistant and start the conversation.
43
+
44
+ <Image alt={'Select Jina AI Model'} inStep src={'https://github.com/user-attachments/assets/be06e348-8d4c-440c-b59f-b71120f21335'} />
45
+
46
+ <Callout type={'warning'}>
47
+ You may need to pay the API service provider during use. Please refer to Jina AI's relevant fee policy.
48
+ </Callout>
49
+ </Steps>
50
+
51
+ Now you can use the models provided by Jina AI in LobeChat to have conversations.
@@ -0,0 +1,51 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 Jina AI API Key
3
+ description: 学习如何在 LobeChat 中配置和使用 Jina AI 模型,获取 API 密钥并开始对话。
4
+ tags:
5
+ - LobeChat
6
+ - Jina AI
7
+ - API密钥
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 Jina AI
12
+
13
+ <Image alt={'在 LobeChat 中使用 Jina AI'} cover src={'https://github.com/user-attachments/assets/840442b1-bf56-4a5f-9700-b3608b16a8a5'} />
14
+
15
+ [Jina AI](https://jina.ai/) 是一家成立于 2020 年的开源神经搜索公司,专注于利用深度学习技术处理多模态数据,提供高效的信息检索解决方案,支持文本、图像、视频等多种数据类型的搜索。
16
+
17
+ 本文档将指导你如何在 LobeChat 中使用 Jina AI:
18
+
19
+ <Steps>
20
+ ### 步骤一:获取 Jina AI API 密钥
21
+
22
+ - 访问 [Jina AI 官方网站](https://jina.ai/),点击首页的 `API` 按钮
23
+
24
+ <Image alt={'获取 Jina AI API 密钥'} inStep src={'https://github.com/user-attachments/assets/5ea37821-4ea8-437c-a15e-3b182d10f19e'} />
25
+
26
+ - 在下方的 `API Key` 菜单中找到系统为你生成的 API Key
27
+ - 复制并保存生成的 API Key
28
+
29
+ <Callout type={'info'}>
30
+ * Jina AI 会为每个用户提供 1M 免费的 API Token,无需注册即可使用 API
31
+ * 如果需要管理 API Key,或为 API 充值,你需要注册并登录 [Jina AI 控制台](https://jina.ai/api-dashboard/)
32
+ </Callout>
33
+
34
+ ### 步骤二:在 LobeChat 中配置 Jina AI
35
+
36
+ - 访问 LobeChat 的 `应用设置`界面
37
+ - 在 `语言模型` 下找到 `Jina AI` 的设置项
38
+
39
+ <Image alt={'填写 Jina AI API 密钥'} inStep src={'https://github.com/user-attachments/assets/1077bee5-b379-4063-b7bd-23b98ec146e2'} />
40
+
41
+ - 打开 Jina AI 并填入获取的 API 密钥
42
+ - 为你的助手选择一个 Jina AI 模型即可开始对话
43
+
44
+ <Image alt={'选择 Jina AI 模型'} inStep src={'https://github.com/user-attachments/assets/be06e348-8d4c-440c-b59f-b71120f21335'} />
45
+
46
+ <Callout type={'warning'}>
47
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 Jina AI 的相关费用政策。
48
+ </Callout>
49
+ </Steps>
50
+
51
+ 至此你已经可以在 LobeChat 中使用 Jina AI 提供的模型进行对话了。
@@ -0,0 +1,75 @@
1
+ ---
2
+ title: Using LM Studio in LobeChat
3
+ description: Learn how to configure and use LM Studio, and run AI models for conversations in LobeChat through LM Studio.
4
+ tags:
5
+ - LobeChat
6
+ - LM Studio
7
+ - Open Source Model
8
+ - Web UI
9
+ ---
10
+
11
+ # Using LM Studio in LobeChat
12
+
13
+ <Image alt={'Using LM Studio in LobeChat'} cover src={'https://github.com/user-attachments/assets/cc1f6146-8063-4a4d-947a-7fd6b9133c0c'} />
14
+
15
+ [LM Studio](https://lmstudio.ai/) is a platform for testing and running large language models (LLMs), providing an intuitive and easy-to-use interface suitable for developers and AI enthusiasts. It supports deploying and running various open-source LLM models, such as Deepseek or Qwen, on local computers, enabling offline AI chatbot functionality, thereby protecting user privacy and providing greater flexibility.
16
+
17
+ This document will guide you on how to use LM Studio in LobeChat:
18
+
19
+ <Steps>
20
+ ### Step 1: Obtain and Install LM Studio
21
+
22
+ - Go to the [LM Studio official website](https://lmstudio.ai/)
23
+ - Choose your platform and download the installation package. LM Studio currently supports MacOS, Windows, and Linux platforms.
24
+ - Follow the prompts to complete the installation and run LM Studio.
25
+
26
+ <Image alt={'Install and run LM Studio'} inStep src={'https://github.com/user-attachments/assets/e887fa04-c553-45f1-917f-5c123ac9c68b'} />
27
+
28
+ ### Step 2: Search and Download Models
29
+
30
+ - Open the `Discover` menu on the left, search for and download the model you want to use.
31
+ - Find a suitable model (such as Deepseek R1) and click download.
32
+ - The download may take some time, please wait patiently for it to complete.
33
+
34
+ <Image alt={'Search and download models'} inStep src={'https://github.com/user-attachments/assets/f878355f-710b-452e-8606-0c75c47f29d2'} />
35
+
36
+ ### Step 3: Deploy and Run Models
37
+
38
+ - Select the downloaded model in the top model selection bar and load the model.
39
+ - Configure the model runtime parameters in the pop-up panel. Refer to the [LM Studio official documentation](https://lmstudio.ai/docs) for detailed parameter settings.
40
+
41
+ <Image alt={'Configure model runtime parameters'} inStep src={'https://github.com/user-attachments/assets/dba58ea6-7df8-4971-b6d4-b24d5f486ba7'} />
42
+
43
+ - Click the `Load Model` button and wait for the model to finish loading and running.
44
+ - Once the model is loaded, you can use it in the chat interface for conversations.
45
+
46
+ ### Step 4: Enable Local Service
47
+
48
+ - If you want to use the model through other programs, you need to start a local API service. Start the service through the `Developer` panel or the software menu. The LM Studio service starts on port `1234` on your local machine by default.
49
+
50
+ <Image alt={'Start local service'} inStep src={'https://github.com/user-attachments/assets/08ced88b-4968-46e8-b1da-0c04ddf5b743'} />
51
+
52
+ - After the local service is started, you also need to enable the `CORS (Cross-Origin Resource Sharing)` option in the service settings so that the model can be used in other programs.
53
+
54
+ <Image alt={'Enable CORS'} inStep src={'https://github.com/user-attachments/assets/8ce79bd6-f1a3-48bb-b3d0-5271c84801c2'} />
55
+
56
+ ### Step 5: Use LM Studio in LobeChat
57
+
58
+ - Visit the `AI Service Provider` interface in LobeChat's `Application Settings`.
59
+ - Find the settings for `LM Studio` in the list of providers.
60
+
61
+ <Image alt={'Fill in the LM Studio address'} inStep src={'https://github.com/user-attachments/assets/143ff392-97b5-427a-97a7-f2f577915728'} />
62
+
63
+ - Open the LM Studio service provider and fill in the API service address.
64
+
65
+ <Callout type={"warning"}>
66
+ If your LM Studio is running locally, make sure to turn on `Client Request Mode`.
67
+ </Callout>
68
+
69
+ - Add the model you are running in the model list below.
70
+ - Select a Volcano Engine model for your assistant to start the conversation.
71
+
72
+ <Image alt={'Select LM Studio model'} inStep src={'https://github.com/user-attachments/assets/bd399cef-283c-4706-bdc8-de9de662de41'} />
73
+ </Steps>
74
+
75
+ Now you can use the model running in LM Studio in LobeChat for conversations.
@@ -0,0 +1,75 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 LM Studio
3
+ description: 学习如何配置和使用 LM Studio,并在 LobeChat 中 通过 LM Studio 运行 AI 模型进行对话。
4
+ tags:
5
+ - LobeChat
6
+ - LM Studio
7
+ - 开源模型
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 LM Studio
12
+
13
+ <Image alt={'在 LobeChat 中使用 LM Studio'} cover src={'https://github.com/user-attachments/assets/cc1f6146-8063-4a4d-947a-7fd6b9133c0c'} />
14
+
15
+ [LM Studio](https://lmstudio.ai/) 是一个用于测试和运行大型语言模型(LLM)的平台,提供了直观易用的界面,适合开发人员和 AI 爱好者使用。它支持在本地电脑上部署和运行各种开源 LLM 模型,例如 Deepseek 或 Qwen,实现离线 AI 聊天机器人的功能,从而保护用户隐私并提供更大的灵活性。
16
+
17
+ 本文档将指导你如何在 LobeChat 中使用 LM Studio:
18
+
19
+ <Steps>
20
+ ### 步骤一:获取并安装 LM Studio
21
+
22
+ - 前往 [LM Studio 官网](https://lmstudio.ai/)
23
+ - 选择你的平台并下载安装包,LM Studio 目前支持 MacOS、Windows 和 Linux 平台
24
+ - 按照提示完成安装,运行 LM Studio
25
+
26
+ <Image alt={'安装并运行 LM Studio'} inStep src={'https://github.com/user-attachments/assets/e887fa04-c553-45f1-917f-5c123ac9c68b'} />
27
+
28
+ ### 步骤二:搜索并下载模型
29
+
30
+ - 打开左侧的 `Discover` 菜单,搜索并下载你想要使用的模型
31
+ - 找到合适的模型(如 Deepseek R1),点击下载
32
+ - 下载可能需要一些时间,耐心等待完成
33
+
34
+ <Image alt={'搜索并下载模型'} inStep src={'https://github.com/user-attachments/assets/f878355f-710b-452e-8606-0c75c47f29d2'} />
35
+
36
+ ### 步骤三:部署并运行模型
37
+
38
+ - 在顶部的模型选择栏中选择下载好的模型,并加载模型
39
+ - 在弹出的面板中配置模型运行参数,详细的参数设置请参考 [LM Studio 官方文档](https://lmstudio.ai/docs)
40
+
41
+ <Image alt={'配置模型运行参数'} inStep src={'https://github.com/user-attachments/assets/dba58ea6-7df8-4971-b6d4-b24d5f486ba7'} />
42
+
43
+ - 点击 `加载模型` 按钮,等待模型完成加载并运行
44
+ - 模型加载完成后,你可以在聊天界面中使用该模型进行对话
45
+
46
+ ### 步骤四:启用本地服务
47
+
48
+ - 如果你希望通过其它程序使用该模型,需要启动一个本地 API 服务,通过 `Developer` 面板或软件菜单启动服务,LM Studio 服务默认启动在本机的 `1234` 端口
49
+
50
+ <Image alt={'启动本地服务'} inStep src={'https://github.com/user-attachments/assets/08ced88b-4968-46e8-b1da-0c04ddf5b743'} />
51
+
52
+ - 本地服务启动后,你还需要在服务设置中开启 `CORS(跨域资源共享)`选项,这样才能在其它程序中使用该模型
53
+
54
+ <Image alt={'开启 CORS'} inStep src={'https://github.com/user-attachments/assets/8ce79bd6-f1a3-48bb-b3d0-5271c84801c2'} />
55
+
56
+ ### 步骤五:在 LobeChat 中使用 LM Studio
57
+
58
+ - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面
59
+ - 在供应商列表中找到 `LM Studio` 的设置项
60
+
61
+ <Image alt={'填写 LM Studio 的地址'} inStep src={'https://github.com/user-attachments/assets/143ff392-97b5-427a-97a7-f2f577915728'} />
62
+
63
+ - 打开 LM Studio 服务商并填入 API 服务地址
64
+
65
+ <Callout type={"warning"}>
66
+ 如果你的 LM Studio 运行在本地,请确保打开`客户端请求模式`
67
+ </Callout>
68
+
69
+ - 在下方的模型列表中添加你运行的模型
70
+ - 为你的助手选择一个火山引擎模型即可开始对话
71
+
72
+ <Image alt={'选择 LM Studio 模型'} inStep src={'https://github.com/user-attachments/assets/bd399cef-283c-4706-bdc8-de9de662de41'} />
73
+ </Steps>
74
+
75
+ 至此你已经可以在 LobeChat 中使用 LM Studio 运行的模型进行对话了。
@@ -0,0 +1,55 @@
1
+ ---
2
+ title: Using Nvidia NIM API Key in LobeChat
3
+ description: Learn how to configure and use Nvidia NIM AI models in LobeChat, obtain an API key, and start a conversation.
4
+ tags:
5
+ - LobeChat
6
+ - Nvidia NIM
7
+ - API Key
8
+ - Web UI
9
+ ---
10
+
11
+ # Using Nvidia NIM in LobeChat
12
+
13
+ <Image alt={'Using Nvidia NIM in LobeChat'} cover src={'https://github.com/user-attachments/assets/539349dd-2c16-4f42-b525-cca74e113541'} />
14
+
15
+ [NVIDIA NIM](https://developer.nvidia.com/nim) is part of NVIDIA AI Enterprise and is designed to accelerate the deployment of generative AI applications through microservices. It provides a set of easy-to-use inference microservices that can run on any cloud, data center, or workstation, supporting NVIDIA GPU acceleration.
16
+
17
+ This document will guide you on how to access and use AI models provided by Nvidia NIM in LobeChat:
18
+
19
+ <Steps>
20
+ ### Step 1: Obtain Nvidia NIM API Key
21
+
22
+ - First, visit the [Nvidia NIM console](https://build.nvidia.com/explore/discover) and complete the registration and login.
23
+ - On the `Models` page, select the model you need, such as Deepseek-R1.
24
+
25
+ <Image alt={'Select Model'} inStep src={'https://github.com/user-attachments/assets/b49ed0c1-d6bf-4f46-b9df-5f7c730afaa3'} />
26
+
27
+ - On the model details page, click "Build with this NIM".
28
+ - In the pop-up dialog, click the `Generate API Key` button.
29
+
30
+ <Image alt={'Get API Key'} inStep src={'https://github.com/user-attachments/assets/5321f987-2c64-4211-8549-bd30ca9b59b9'} />
31
+
32
+ - Copy and save the created API Key.
33
+
34
+ <Callout type={'warning'}>
35
+ Please store the key securely as it will only appear once. If you accidentally lose it, you will need to create a new key.
36
+ </Callout>
37
+
38
+ ### Step 2: Configure Nvidia NIM in LobeChat
39
+
40
+ - Visit the `Application Settings` -> `AI Service Provider` interface in LobeChat.
41
+ - Find the settings item for `Nvidia NIM` in the list of providers.
42
+
43
+ <Image alt={'Fill in the Nvidia NIM API Key'} inStep src={'https://github.com/user-attachments/assets/dfc45807-2ed6-43eb-af4c-47df66dfff7d'} />
44
+
45
+ - Enable the Nvidia NIM service provider and fill in the obtained API key.
46
+ - Select an Nvidia NIM model for your assistant and start the conversation.
47
+
48
+ <Image alt={'Select Nvidia NIM Model'} inStep src={'https://github.com/user-attachments/assets/cb4ba5fe-c223-4b9f-a662-de93e4a536d1'} />
49
+
50
+ <Callout type={'warning'}>
51
+ You may need to pay the API service provider during use, please refer to Nvidia NIM's related fee policies.
52
+ </Callout>
53
+ </Steps>
54
+
55
+ Now you can use the models provided by Nvidia NIM to have conversations in LobeChat.
@@ -0,0 +1,55 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 Nvidia NIM API Key
3
+ description: 学习如何在 LobeChat 中配置和使用 Nvidia NIM AI 模型,获取 API 密钥并开始对话。
4
+ tags:
5
+ - LobeChat
6
+ - Nvidia NIM
7
+ - API密钥
8
+ - Web UI
9
+ ---
10
+
11
+ # 在 LobeChat 中使用 Nvidia NIM
12
+
13
+ <Image alt={'在 LobeChat 中使用 Nvidia NIM'} cover src={'https://github.com/user-attachments/assets/539349dd-2c16-4f42-b525-cca74e113541'} />
14
+
15
+ [NVIDIA NIM](https://developer.nvidia.com/nim) 是 NVIDIA AI Enterprise 的一部分,旨在通过微服务加速生成式 AI 应用的部署。它提供了一组易于使用的推理微服务,可以在任何云、数据中心或工作站上运行,支持 NVIDIA GPU 加速。
16
+
17
+ 本文档将指导你如何在 LobeChat 中接入并使用 Nvidia NIM 提供的 AI 模型:
18
+
19
+ <Steps>
20
+ ### 步骤一:获取 Nvidia NIM API 密钥
21
+
22
+ - 首先,访问[Nvidia NIM 控制台](https://build.nvidia.com/explore/discover)并完成注册登录
23
+ - 在 `Models` 页面选择你需要的模型,例如 Deepseek-R1
24
+
25
+ <Image alt={'选择模型'} inStep src={'https://github.com/user-attachments/assets/b49ed0c1-d6bf-4f46-b9df-5f7c730afaa3'} />
26
+
27
+ - 在模型详情页点击`使用此NIM构建`
28
+ - 在弹出的对话框中点击`生成 API Key` 按钮
29
+
30
+ <Image alt={'获取 API Key'} inStep src={'https://github.com/user-attachments/assets/5321f987-2c64-4211-8549-bd30ca9b59b9'} />
31
+
32
+ - 复制并保存创建好的 API Key
33
+
34
+ <Callout type={'warning'}>
35
+ 请安全地存储密钥,因为它只会出现一次。如果你意外丢失它,您将需要创建一个新密钥。
36
+ </Callout>
37
+
38
+ ### 步骤二:在 LobeChat 中配置 Nvidia NIM
39
+
40
+ - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面
41
+ - 在供应商列表中找到 ` Nvidia NIM` 的设置项
42
+
43
+ <Image alt={'填写 Nvidia NIM API 密钥'} inStep src={'https://github.com/user-attachments/assets/dfc45807-2ed6-43eb-af4c-47df66dfff7d'} />
44
+
45
+ - 打开 Nvidia NIM 服务商并填入获取的 API 密钥
46
+ - 为你的助手选择一个 Nvidia NIM 模型即可开始对话
47
+
48
+ <Image alt={'选择 Nvidia NIM 模型'} inStep src={'https://github.com/user-attachments/assets/cb4ba5fe-c223-4b9f-a662-de93e4a536d1'} />
49
+
50
+ <Callout type={'warning'}>
51
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考 Nvidia NIM 的相关费用政策。
52
+ </Callout>
53
+ </Steps>
54
+
55
+ 至此你已经可以在 LobeChat 中使用 Nvidia NIM 提供的模型进行对话了。