@lobehub/chat 1.67.2 → 1.68.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/.env.example +4 -0
  2. package/CHANGELOG.md +33 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/README.md +3 -2
  6. package/README.zh-CN.md +1 -1
  7. package/changelog/v1.json +12 -0
  8. package/docs/self-hosting/advanced/auth.mdx +6 -5
  9. package/docs/self-hosting/advanced/auth.zh-CN.mdx +6 -5
  10. package/docs/self-hosting/environment-variables/model-provider.mdx +16 -0
  11. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -0
  12. package/docs/usage/providers/ppio.mdx +57 -0
  13. package/docs/usage/providers/ppio.zh-CN.mdx +55 -0
  14. package/locales/en-US/providers.json +3 -0
  15. package/locales/zh-CN/providers.json +4 -0
  16. package/package.json +5 -5
  17. package/packages/web-crawler/src/__test__/crawler.test.ts +176 -0
  18. package/packages/web-crawler/src/utils/appUrlRules.test.ts +76 -0
  19. package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  20. package/src/config/aiModels/index.ts +3 -0
  21. package/src/config/aiModels/ppio.ts +276 -0
  22. package/src/config/llm.ts +6 -0
  23. package/src/config/modelProviders/index.ts +4 -0
  24. package/src/config/modelProviders/ppio.ts +249 -0
  25. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  26. package/src/libs/agent-runtime/ppio/__snapshots__/index.test.ts.snap +26 -0
  27. package/src/libs/agent-runtime/ppio/fixtures/models.json +42 -0
  28. package/src/libs/agent-runtime/ppio/index.test.ts +264 -0
  29. package/src/libs/agent-runtime/ppio/index.ts +51 -0
  30. package/src/libs/agent-runtime/ppio/type.ts +12 -0
  31. package/src/libs/agent-runtime/types/type.ts +1 -0
  32. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +2 -2
  33. package/src/server/routers/tools/__test__/search.test.ts +146 -0
  34. package/src/store/chat/slices/builtinTool/actions/searXNG.test.ts +67 -0
  35. package/src/store/tool/slices/builtin/selectors.test.ts +12 -0
  36. package/src/store/tool/slices/builtin/selectors.ts +4 -1
  37. package/src/types/user/settings/keyVaults.ts +1 -0
package/.env.example CHANGED
@@ -127,6 +127,10 @@ OPENAI_API_KEY=sk-xxxxxxxxx
127
127
 
128
128
  # TENCENT_CLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
129
129
 
130
+ ### PPIO ####
131
+
132
+ # PPIO_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
133
+
130
134
  ########################################
131
135
  ############ Market Service ############
132
136
  ########################################
package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.68.0](https://github.com/lobehub/lobe-chat/compare/v1.67.2...v1.68.0)
6
+
7
+ <sup>Released on **2025-03-03**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add new model provider PPIO.
12
+
13
+ #### 🐛 Bug Fixes
14
+
15
+ - **misc**: Fix search web-browsing display bug.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's improved
23
+
24
+ - **misc**: Add new model provider PPIO, closes [#6133](https://github.com/lobehub/lobe-chat/issues/6133) ([23a3fda](https://github.com/lobehub/lobe-chat/commit/23a3fda))
25
+
26
+ #### What's fixed
27
+
28
+ - **misc**: Fix search web-browsing display bug, closes [#6653](https://github.com/lobehub/lobe-chat/issues/6653) ([f472643](https://github.com/lobehub/lobe-chat/commit/f472643))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ### [Version 1.67.2](https://github.com/lobehub/lobe-chat/compare/v1.67.1...v1.67.2)
6
39
 
7
40
  <sup>Released on **2025-03-02**</sup>
package/Dockerfile CHANGED
@@ -197,6 +197,8 @@ ENV \
197
197
  OPENROUTER_API_KEY="" OPENROUTER_MODEL_LIST="" \
198
198
  # Perplexity
199
199
  PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
200
+ # PPIO
201
+ PPIO_API_KEY="" PPIO_MODEL_LIST="" \
200
202
  # Qwen
201
203
  QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
202
204
  # SambaNova
@@ -240,6 +240,8 @@ ENV \
240
240
  OPENROUTER_API_KEY="" OPENROUTER_MODEL_LIST="" \
241
241
  # Perplexity
242
242
  PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
243
+ # PPIO
244
+ PPIO_API_KEY="" PPIO_MODEL_LIST="" \
243
245
  # Qwen
244
246
  QWEN_API_KEY="" QWEN_MODEL_LIST="" QWEN_PROXY_URL="" \
245
247
  # SambaNova
package/README.md CHANGED
@@ -198,6 +198,7 @@ We have implemented support for the following model service providers:
198
198
 
199
199
  <details><summary><kbd>See more providers (+26)</kbd></summary>
200
200
 
201
+ - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc. [Learn more](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link)
201
202
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI is a platform providing a variety of large language models and AI image generation API services, flexible, reliable, and cost-effective. It supports the latest open-source models like Llama3 and Mistral, offering a comprehensive, user-friendly, and auto-scaling API solution for generative AI application development, suitable for the rapid growth of AI startups.
202
203
  - **[Together AI](https://lobechat.com/discover/provider/togetherai)**: Together AI is dedicated to achieving leading performance through innovative AI models, offering extensive customization capabilities, including rapid scaling support and intuitive deployment processes to meet various enterprise needs.
203
204
  - **[Fireworks AI](https://lobechat.com/discover/provider/fireworksai)**: Fireworks AI is a leading provider of advanced language model services, focusing on functional calling and multimodal processing. Its latest model, Firefunction V2, is based on Llama-3, optimized for function calling, conversation, and instruction following. The visual language model FireLLaVA-13B supports mixed input of images and text. Other notable models include the Llama series and Mixtral series, providing efficient multilingual instruction following and generation support.
@@ -668,7 +669,7 @@ If you would like to learn more details, please feel free to look at our [📘 D
668
669
 
669
670
  ## 🤝 Contributing
670
671
 
671
- Contributions of all types are more than welcome; if you are interested in contributing code, feel free to check out our GitHub [Issues][github-issues-link] and [Projects][github-project-link] to get stuck in to show us what youre made of.
672
+ Contributions of all types are more than welcome; if you are interested in contributing code, feel free to check out our GitHub [Issues][github-issues-link] and [Projects][github-project-link] to get stuck in to show us what you're made of.
672
673
 
673
674
  > \[!TIP]
674
675
  >
@@ -889,7 +890,7 @@ This project is [Apache 2.0](./LICENSE) licensed.
889
890
  [profile-link]: https://github.com/lobehub
890
891
  [share-linkedin-link]: https://linkedin.com/feed
891
892
  [share-linkedin-shield]: https://img.shields.io/badge/-share%20on%20linkedin-black?labelColor=black&logo=linkedin&logoColor=white&style=flat-square
892
- [share-mastodon-link]: https://mastodon.social/share?text=Check%20this%20GitHub%20repository%20out%20%F0%9F%A4%AF%20LobeChat%20-%20An%20open-source,%20extensible%20(Function%20Calling),%20high-performance%20chatbot%20framework.%20It%20supports%20one-click%20free%20deployment%20of%20your%20private%20ChatGPT/LLM%20web%20application.%20https://github.com/lobehub/lobe-chat%20#chatbot%20#chatGPT%20#openAI
893
+ [share-mastodon-link]: https://mastodon.social/share?text=Check%20this%20GitHub%20repository%20out%20%F0%9F%A4%AF%20LobeChat%20-%20An%20open-source,%20extensible%20%28Function%20Calling%29,%20high-performance%20chatbot%20framework.%20It%20supports%20one-click%20free%20deployment%20of%20your%20private%20ChatGPT%2FLLM%20web%20application.%20https://github.com/lobehub/lobe-chat%20#chatbot%20#chatGPT%20#openAI
893
894
  [share-mastodon-shield]: https://img.shields.io/badge/-share%20on%20mastodon-black?labelColor=black&logo=mastodon&logoColor=white&style=flat-square
894
895
  [share-reddit-link]: https://www.reddit.com/submit?title=Check%20this%20GitHub%20repository%20out%20%F0%9F%A4%AF%20LobeChat%20-%20An%20open-source%2C%20extensible%20%28Function%20Calling%29%2C%20high-performance%20chatbot%20framework.%20It%20supports%20one-click%20free%20deployment%20of%20your%20private%20ChatGPT%2FLLM%20web%20application.%20%23chatbot%20%23chatGPT%20%23openAI&url=https%3A%2F%2Fgithub.com%2Flobehub%2Flobe-chat
895
896
  [share-reddit-shield]: https://img.shields.io/badge/-share%20on%20reddit-black?labelColor=black&logo=reddit&logoColor=white&style=flat-square
package/README.zh-CN.md CHANGED
@@ -197,7 +197,7 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
197
197
  - **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
198
198
 
199
199
  <details><summary><kbd>See more providers (+26)</kbd></summary>
200
-
200
+ - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。[了解更多](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link)
201
201
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI 是一个提供多种大语言模型与 AI 图像生成的 API 服务的平台,灵活、可靠且具有成本效益。它支持 Llama3、Mistral 等最新的开源模型,并为生成式 AI 应用开发提供了全面、用户友好且自动扩展的 API 解决方案,适合 AI 初创公司的快速发展。
202
202
  - **[Together AI](https://lobechat.com/discover/provider/togetherai)**: Together AI 致力于通过创新的 AI 模型实现领先的性能,提供广泛的自定义能力,包括快速扩展支持和直观的部署流程,满足企业的各种需求。
203
203
  - **[Fireworks AI](https://lobechat.com/discover/provider/fireworksai)**: Fireworks AI 是一家领先的高级语言模型服务商,专注于功能调用和多模态处理。其最新模型 Firefunction V2 基于 Llama-3,优化用于函数调用、对话及指令跟随。视觉语言模型 FireLLaVA-13B 支持图像和文本混合输入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多语言指令跟随与生成支持。
package/changelog/v1.json CHANGED
@@ -1,4 +1,16 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Add new model provider PPIO."
6
+ ],
7
+ "fixes": [
8
+ "Fix search web-browsing display bug."
9
+ ]
10
+ },
11
+ "date": "2025-03-03",
12
+ "version": "1.68.0"
13
+ },
2
14
  {
3
15
  "children": {
4
16
  "fixes": [
@@ -27,11 +27,12 @@ By setting the environment variables `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` and `CL
27
27
 
28
28
  Before using NextAuth, please set the following variables in LobeChat's environment variables:
29
29
 
30
- | Environment Variable | Type | Description |
31
- | ------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
32
- | `NEXT_AUTH_SECRET` | Required | The key used to encrypt Auth.js session tokens. You can use the following command: `openssl rand -base64 32`, or visit `https://generate-secret.vercel.app/32` to generate the key. |
33
- | `NEXTAUTH_URL` | Required | This URL specifies the callback address for Auth.js when performing OAuth verification. Set this only if the default generated redirect address is incorrect. `https://example.com/api/auth` |
34
- | `NEXT_AUTH_SSO_PROVIDERS` | Optional | This environment variable is used to enable multiple identity verification sources simultaneously, separated by commas, for example, `auth0,microsoft-entra-id,authentik`. |
30
+ | Environment Variable | Type | Description |
31
+ | ------------------------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
32
+ | `NEXT_PUBLIC_ENABLE_NEXT_AUTH` | Required | This is used to enable the NextAuth service. Set it to `1` to enable it; changing this setting requires recompiling the application. Users deploying with the `lobehub/lobe-chat-database` image have this configuration added by default. |
33
+ | `NEXT_AUTH_SECRET` | Required | The key used to encrypt Auth.js session tokens. You can use the following command: `openssl rand -base64 32`, or visit `https://generate-secret.vercel.app/32` to generate the key. |
34
+ | `NEXTAUTH_URL` | Required | This URL specifies the callback address for Auth.js when performing OAuth verification. Set this only if the default generated redirect address is incorrect. `https://example.com/api/auth` |
35
+ | `NEXT_AUTH_SSO_PROVIDERS` | Optional | This environment variable is used to enable multiple identity verification sources simultaneously, separated by commas, for example, `auth0,microsoft-entra-id,authentik`. |
35
36
 
36
37
  Currently supported identity verification services include:
37
38
 
@@ -24,11 +24,12 @@ LobeChat 与 Clerk 做了深度集成,能够为用户提供一个更加安全
24
24
 
25
25
  在使用 NextAuth 之前,请先在 LobeChat 的环境变量中设置以下变量:
26
26
 
27
- | 环境变量 | 类型 | 描述 |
28
- | ------------------------- | -- | ------------------------------------------------------------------------------------------------------------ |
29
- | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令: `openssl rand -base64 32`,或者访问 `https://generate-secret.vercel.app/32` 生成秘钥。 |
30
- | `NEXTAUTH_URL` | 必选 | URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` |
31
- | `NEXT_AUTH_SSO_PROVIDERS` | 可选 | 该环境变量用于同时启用多个身份验证源,以逗号 `,` 分割,例如 `auth0,microsoft-entra-id,authentik`。 |
27
+ | 环境变量 | 类型 | 描述 |
28
+ | ------------------------------ | -- | ------------------------------------------------------------------------------------------------------------ |
29
+ | `NEXT_PUBLIC_ENABLE_NEXT_AUTH` | 必选 | 用于启用 NextAuth 服务,设置为 `1` 以启用,更改此项需要重新编译应用。使用 `lobehub/lobe-chat-database` 镜像部署的用户已经默认添加了该项配置。 |
30
+ | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令: `openssl rand -base64 32`,或者访问 `https://generate-secret.vercel.app/32` 生成秘钥。 |
31
+ | `NEXTAUTH_URL` | 必选 | URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` |
32
+ | `NEXT_AUTH_SSO_PROVIDERS` | 可选 | 该环境变量用于同时启用多个身份验证源,以逗号 `,` 分割,例如 `auth0,microsoft-entra-id,authentik`。 |
32
33
 
33
34
  目前支持的身份验证服务有:
34
35
 
@@ -217,6 +217,22 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
217
217
  - Default: `-`
218
218
  - Example: `-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta`
219
219
 
220
+ ## PPIO
221
+
222
+ ### `PPIO_API_KEY`
223
+
224
+ - Type: Required
225
+ - Description: This your PPIO API Key.
226
+ - Default: -
227
+ - Example: `sk_xxxxxxxxxx`
228
+
229
+ ### `PPIO_MODEL_LIST`
230
+
231
+ - Type: Optional
232
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
233
+ - Default: `-`
234
+ - Example: `-all,+deepseek/deepseek-v3/community,+deepseek/deepseek-r1-distill-llama-70b`
235
+
220
236
  ## Github
221
237
 
222
238
  ### `GITHUB_TOKEN`
@@ -215,6 +215,22 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
215
215
  - 默认值:`-`
216
216
  - 示例:`-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta`
217
217
 
218
+ ## PPIO
219
+
220
+ ### `PPIO_API_KEY`
221
+
222
+ - 类型:必选
223
+ - 描述:这是你在 PPIO 网站申请的 API 密钥
224
+ - 默认值:-
225
+ - 示例:`sk_xxxxxxxxxxxx`
226
+
227
+ ### `PPIO_MODEL_LIST`
228
+
229
+ - 类型:可选
230
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
231
+ - 默认值:`-`
232
+ - 示例:`-all,+deepseek/deepseek-v3/community,+deepseek/deepseek-r1-distill-llama-70b`
233
+
218
234
  ## Github
219
235
 
220
236
  ### `GITHUB_TOKEN`
@@ -0,0 +1,57 @@
1
+ ---
2
+ title: Using PPIO API Key in LobeChat
3
+ description: >-
4
+ Learn how to integrate PPIO's language model APIs into LobeChat. Follow
5
+ the steps to register, create an PPIO API key, configure settings, and
6
+ chat with our various AI models.
7
+ tags:
8
+ - PPIO
9
+ - DeepSeek
10
+ - Llama
11
+ - Qwen
12
+ - uncensored
13
+ - API key
14
+ - Web UI
15
+ ---
16
+
17
+ # Using PPIO in LobeChat
18
+
19
+ <Image alt={'Using PPIO in LobeChat'} cover src={''} />
20
+
21
+ [PPIO](https://ppinfra.com?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link) supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.
22
+
23
+ This document will guide you on how to integrate PPIO in LobeChat:
24
+
25
+ <Steps>
26
+ ### Step 1: Register and Log in to PPIO
27
+
28
+ - Visit [PPIO](https://ppinfra.com?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link) and create an account
29
+ - Upon registration, PPIO will provide a ¥5 credit (about 5M tokens).
30
+
31
+ <Image alt={'Register PPIO'} height={457} inStep src={'https://github.com/user-attachments/assets/7cb3019b-78c1-48e0-a64c-a6a4836affd9'} />
32
+
33
+ ### Step 2: Obtain the API Key
34
+
35
+ - Visit PPIO's [key management page](https://ppinfra.com/settings/key-management), create and copy an API Key.
36
+
37
+ <Image alt={'Obtain PPIO API key'} inStep src={'https://github.com/user-attachments/assets/5abcf21d-5a6c-4fc8-8de6-bc47d4d2fa98'} />
38
+
39
+ ### Step 3: Configure PPIO in LobeChat
40
+
41
+ - Visit the `Settings` interface in LobeChat
42
+ - Find the setting for `PPIO` under `Language Model`
43
+
44
+ <Image alt={'Enter PPIO API key in LobeChat'} inStep src={'https://github.com/user-attachments/assets/000d6a5b-f8d4-4fd5-84cd-31556c5c1efd'} />
45
+
46
+ - Open PPIO and enter the obtained API key
47
+ - Choose a PPIO model for your assistant to start the conversation
48
+
49
+ <Image alt={'Select and use PPIO model'} inStep src={'https://github.com/user-attachments/assets/207888f1-df21-4063-8e66-97b0d9cfa02e'} />
50
+
51
+ <Callout type={'warning'}>
52
+ During usage, you may need to pay the API service provider, please refer to PPIO's [pricing
53
+ policy](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link).
54
+ </Callout>
55
+ </Steps>
56
+
57
+ You can now engage in conversations using the models provided by PPIO in LobeChat.
@@ -0,0 +1,55 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 PPIO 派欧云 API Key
3
+ description: >-
4
+ 学习如何将 PPIO 派欧云的 LLM API 集成到 LobeChat 中。跟随以下步骤注册 PPIO 账号、创建 API
5
+ Key、并在 LobeChat 中进行设置。
6
+ tags:
7
+ - PPIO
8
+ - PPInfra
9
+ - DeepSeek
10
+ - Qwen
11
+ - Llama3
12
+ - API key
13
+ - Web UI
14
+ ---
15
+
16
+ # 在 LobeChat 中使用 PPIO 派欧云
17
+
18
+ <Image alt={'在 LobeChat 中使用 PPIO'} cover src={''} />
19
+
20
+ [PPIO 派欧云](https://ppinfra.com?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link)提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。
21
+
22
+ 本文档将指导你如何在 LobeChat 中使用 PPIO:
23
+
24
+ <Steps>
25
+ ### 步骤一:注册 PPIO 派欧云账号并登录
26
+
27
+ - 访问 [PPIO 派欧云](https://ppinfra.com?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link) 并注册账号
28
+ - 注册后,PPIO 会赠送 5 元(约 500 万 tokens)的使用额度
29
+
30
+ <Image alt={'注册 PPIO'} height={457} inStep src={'https://github.com/user-attachments/assets/7cb3019b-78c1-48e0-a64c-a6a4836affd9'} />
31
+
32
+ ### 步骤二:创建 API 密钥
33
+
34
+ - 访问 PPIO 派欧云的 [密钥管理页面](https://ppinfra.com/settings/key-management) ,创建并且复制一个 API 密钥.
35
+
36
+ <Image alt={'创建 PPIO API 密钥'} inStep src={'https://github.com/user-attachments/assets/5abcf21d-5a6c-4fc8-8de6-bc47d4d2fa98'} />
37
+
38
+ ### 步骤三:在 LobeChat 中配置 PPIO 派欧云
39
+
40
+ - 访问 LobeChat 的 `设置` 界面
41
+ - 在 `语言模型` 下找到 `PPIO` 的设置项
42
+ - 打开 PPIO 并填入获得的 API 密钥
43
+
44
+ <Image alt={'在 LobeChat 中输入 PPIO API 密钥'} inStep src={'https://github.com/user-attachments/assets/4eaadac7-595c-41ad-a6e0-64c3105577d7'} />
45
+
46
+ - 为你的助手选择一个 Novita AI 模型即可开始对话
47
+
48
+ <Image alt={'选择并使用 PPIO 模型'} inStep src={'https://github.com/user-attachments/assets/8cf66e00-04fe-4bad-9e3d-35afc7d9aa58'} />
49
+
50
+ <Callout type={'warning'}>
51
+ 在使用过程中你可能需要向 API 服务提供商付费,PPIO 的 API 费用参考[这里](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link)。
52
+ </Callout>
53
+ </Steps>
54
+
55
+ 至此你已经可以在 LobeChat 中使用 Novita AI 提供的模型进行对话了。
@@ -139,5 +139,8 @@
139
139
  },
140
140
  "zhipu": {
141
141
  "description": "Zhipu AI offers an open platform for multimodal and language models, supporting a wide range of AI application scenarios, including text processing, image understanding, and programming assistance."
142
+ },
143
+ "ppio": {
144
+ "description": "PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc."
142
145
  }
143
146
  }
@@ -139,5 +139,9 @@
139
139
  },
140
140
  "zhipu": {
141
141
  "description": "智谱 AI 提供多模态与语言模型的开放平台,支持广泛的AI应用场景,包括文本处理、图像理解与编程辅助等。"
142
+ },
143
+ "ppio": {
144
+ "description": "PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。"
142
145
  }
143
146
  }
147
+
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.67.2",
3
+ "version": "1.68.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -30,11 +30,11 @@
30
30
  ],
31
31
  "scripts": {
32
32
  "build": "next build",
33
- "build:analyze": "ANALYZE=true next build",
34
- "build:docker": "DOCKER=true next build && npm run build-sitemap",
35
33
  "postbuild": "npm run build-sitemap && npm run build-migrate-db",
36
34
  "build-migrate-db": "bun run db:migrate",
37
35
  "build-sitemap": "tsx ./scripts/buildSitemapIndex/index.ts",
36
+ "build:analyze": "ANALYZE=true next build",
37
+ "build:docker": "DOCKER=true next build && npm run build-sitemap",
38
38
  "db:generate": "drizzle-kit generate && npm run db:generate-client",
39
39
  "db:generate-client": "tsx ./scripts/migrateClientDB/compile-migrations.ts",
40
40
  "db:migrate": "MIGRATION_DB=1 tsx ./scripts/migrateServerDB/index.ts",
@@ -62,11 +62,11 @@
62
62
  "start": "next start -p 3210",
63
63
  "stylelint": "stylelint \"src/**/*.{js,jsx,ts,tsx}\" --fix",
64
64
  "test": "npm run test-app && npm run test-server",
65
- "test:update": "vitest -u",
66
65
  "test-app": "vitest run --config vitest.config.ts",
67
66
  "test-app:coverage": "vitest run --config vitest.config.ts --coverage",
68
67
  "test-server": "vitest run --config vitest.server.config.ts",
69
68
  "test-server:coverage": "vitest run --config vitest.server.config.ts --coverage",
69
+ "test:update": "vitest -u",
70
70
  "type-check": "tsc --noEmit",
71
71
  "webhook:ngrok": "ngrok http http://localhost:3011",
72
72
  "workflow:cdn": "tsx ./scripts/cdnWorkflow/index.ts",
@@ -107,7 +107,7 @@
107
107
  "dependencies": {
108
108
  "@ant-design/icons": "^5.5.2",
109
109
  "@ant-design/pro-components": "^2.8.3",
110
- "@anthropic-ai/sdk": "^0.37.0",
110
+ "@anthropic-ai/sdk": "^0.39.0",
111
111
  "@auth/core": "^0.38.0",
112
112
  "@aws-sdk/client-bedrock-runtime": "^3.723.0",
113
113
  "@aws-sdk/client-s3": "^3.723.0",
@@ -0,0 +1,176 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import { Crawler } from '../crawler';
4
+
5
+ // Move mocks outside of test cases to avoid hoisting issues
6
+ vi.mock('../crawImpl', () => ({
7
+ crawlImpls: {
8
+ naive: vi.fn(),
9
+ jina: vi.fn(),
10
+ browserless: vi.fn(),
11
+ },
12
+ }));
13
+
14
+ vi.mock('../utils/appUrlRules', () => ({
15
+ applyUrlRules: vi.fn().mockReturnValue({
16
+ transformedUrl: 'https://example.com',
17
+ filterOptions: {},
18
+ }),
19
+ }));
20
+
21
+ describe('Crawler', () => {
22
+ const crawler = new Crawler();
23
+
24
+ it('should crawl successfully with default impls', async () => {
25
+ const mockResult = {
26
+ content: 'test content',
27
+ contentType: 'text' as const,
28
+ url: 'https://example.com',
29
+ };
30
+
31
+ const { crawlImpls } = await import('../crawImpl');
32
+ vi.mocked(crawlImpls.naive).mockResolvedValue(mockResult);
33
+
34
+ const result = await crawler.crawl({
35
+ url: 'https://example.com',
36
+ });
37
+
38
+ expect(result).toEqual({
39
+ crawler: 'naive',
40
+ data: mockResult,
41
+ originalUrl: 'https://example.com',
42
+ transformedUrl: undefined,
43
+ });
44
+ });
45
+
46
+ it('should use user provided impls', async () => {
47
+ const mockResult = {
48
+ content: 'test content',
49
+ contentType: 'text' as const,
50
+ url: 'https://example.com',
51
+ };
52
+
53
+ const { crawlImpls } = await import('../crawImpl');
54
+ vi.mocked(crawlImpls.jina).mockResolvedValue(mockResult);
55
+
56
+ const result = await crawler.crawl({
57
+ impls: ['jina'],
58
+ url: 'https://example.com',
59
+ });
60
+
61
+ expect(result).toEqual({
62
+ crawler: 'jina',
63
+ data: mockResult,
64
+ originalUrl: 'https://example.com',
65
+ transformedUrl: undefined,
66
+ });
67
+ });
68
+
69
+ it('should handle crawl errors', async () => {
70
+ const mockError = new Error('Crawl failed');
71
+ mockError.name = 'CrawlError';
72
+
73
+ const { crawlImpls } = await import('../crawImpl');
74
+ vi.mocked(crawlImpls.naive).mockRejectedValue(mockError);
75
+ vi.mocked(crawlImpls.jina).mockRejectedValue(mockError);
76
+ vi.mocked(crawlImpls.browserless).mockRejectedValue(mockError);
77
+
78
+ const result = await crawler.crawl({
79
+ url: 'https://example.com',
80
+ });
81
+
82
+ expect(result).toEqual({
83
+ content: 'Fail to crawl the page. Error type: CrawlError, error message: Crawl failed',
84
+ errorMessage: 'Crawl failed',
85
+ errorType: 'CrawlError',
86
+ originalUrl: 'https://example.com',
87
+ transformedUrl: undefined,
88
+ });
89
+ });
90
+
91
+ it('should handle transformed urls', async () => {
92
+ const mockResult = {
93
+ content: 'test content',
94
+ contentType: 'text' as const,
95
+ url: 'https://transformed.example.com',
96
+ };
97
+
98
+ const { crawlImpls } = await import('../crawImpl');
99
+ vi.mocked(crawlImpls.naive).mockResolvedValue(mockResult);
100
+
101
+ const { applyUrlRules } = await import('../utils/appUrlRules');
102
+ vi.mocked(applyUrlRules).mockReturnValue({
103
+ transformedUrl: 'https://transformed.example.com',
104
+ filterOptions: {},
105
+ });
106
+
107
+ const result = await crawler.crawl({
108
+ url: 'https://example.com',
109
+ });
110
+
111
+ expect(result).toEqual({
112
+ crawler: 'naive',
113
+ data: mockResult,
114
+ originalUrl: 'https://example.com',
115
+ transformedUrl: 'https://transformed.example.com',
116
+ });
117
+ });
118
+
119
+ it('should merge filter options correctly', async () => {
120
+ const mockResult = {
121
+ content: 'test content',
122
+ contentType: 'text' as const,
123
+ url: 'https://example.com',
124
+ };
125
+
126
+ const { crawlImpls } = await import('../crawImpl');
127
+ const mockCrawlImpl = vi.mocked(crawlImpls.naive).mockResolvedValue(mockResult);
128
+
129
+ const { applyUrlRules } = await import('../utils/appUrlRules');
130
+ vi.mocked(applyUrlRules).mockReturnValue({
131
+ transformedUrl: 'https://example.com',
132
+ filterOptions: { pureText: true },
133
+ });
134
+
135
+ await crawler.crawl({
136
+ url: 'https://example.com',
137
+ filterOptions: { enableReadability: true },
138
+ });
139
+
140
+ expect(mockCrawlImpl).toHaveBeenCalledWith('https://example.com', {
141
+ filterOptions: {
142
+ pureText: true,
143
+ enableReadability: true,
144
+ },
145
+ });
146
+ });
147
+
148
+ it('should use rule impls when provided', async () => {
149
+ const mockResult = {
150
+ content: 'test content',
151
+ contentType: 'text' as const,
152
+ url: 'https://example.com',
153
+ };
154
+
155
+ const { crawlImpls } = await import('../crawImpl');
156
+ vi.mocked(crawlImpls.jina).mockResolvedValue(mockResult);
157
+
158
+ const { applyUrlRules } = await import('../utils/appUrlRules');
159
+ vi.mocked(applyUrlRules).mockReturnValue({
160
+ transformedUrl: 'https://example.com',
161
+ filterOptions: {},
162
+ impls: ['jina'],
163
+ });
164
+
165
+ const result = await crawler.crawl({
166
+ url: 'https://example.com',
167
+ });
168
+
169
+ expect(result).toEqual({
170
+ crawler: 'jina',
171
+ data: mockResult,
172
+ originalUrl: 'https://example.com',
173
+ transformedUrl: undefined,
174
+ });
175
+ });
176
+ });