@lobehub/chat 1.6.4 → 1.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/docs/self-hosting/advanced/sso-providers/auth0.zh-CN.mdx +4 -8
  3. package/docs/usage/agents/agent-organization.mdx +69 -0
  4. package/docs/usage/agents/agent-organization.zh-CN.mdx +67 -0
  5. package/docs/usage/foundation/basic.mdx +55 -0
  6. package/docs/usage/foundation/basic.zh-CN.mdx +56 -0
  7. package/docs/usage/foundation/share.mdx +46 -0
  8. package/docs/usage/foundation/share.zh-CN.mdx +43 -0
  9. package/docs/usage/foundation/text2image.mdx +49 -0
  10. package/docs/usage/foundation/text2image.zh-CN.mdx +46 -0
  11. package/docs/usage/foundation/translate.mdx +40 -0
  12. package/docs/usage/foundation/translate.zh-CN.mdx +39 -0
  13. package/docs/usage/foundation/tts-stt.mdx +47 -0
  14. package/docs/usage/foundation/tts-stt.zh-CN.mdx +45 -0
  15. package/docs/usage/foundation/vision.mdx +49 -0
  16. package/docs/usage/foundation/vision.zh-CN.mdx +45 -0
  17. package/docs/usage/providers/baichuan.mdx +64 -0
  18. package/docs/usage/providers/baichuan.zh-CN.mdx +61 -0
  19. package/docs/usage/providers/taichu.mdx +64 -0
  20. package/docs/usage/providers/taichu.zh-CN.mdx +61 -0
  21. package/package.json +5 -5
  22. package/src/config/modelProviders/deepseek.ts +1 -0
  23. package/src/config/modelProviders/google.ts +6 -18
  24. package/src/config/modelProviders/groq.ts +11 -5
  25. package/src/config/modelProviders/ollama.ts +26 -3
  26. package/src/config/modelProviders/openai.ts +12 -9
  27. package/src/config/modelProviders/perplexity.ts +3 -3
  28. package/src/config/modelProviders/togetherai.ts +2 -2
  29. package/src/config/modelProviders/zeroone.ts +3 -7
  30. package/src/config/modelProviders/zhipu.ts +8 -4
  31. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +2 -2
  32. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +1 -1
  33. package/src/libs/agent-runtime/qwen/index.test.ts +15 -13
  34. package/src/libs/agent-runtime/qwen/index.ts +1 -1
  35. package/src/libs/agent-runtime/togetherai/__snapshots__/index.test.ts.snap +3 -3
  36. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +15 -7
  37. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +1 -1
  38. package/src/libs/agent-runtime/utils/streams/qwen.ts +4 -4
  39. package/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +1 -1
@@ -0,0 +1,45 @@
1
+ ---
2
+ title: LobeChat 文字语音转换功能指南
3
+ description: 了解如何在 LobeChat 中使用文字语音转换功能,包括文字转语音(TTS)和语音转文字(STT),以及设置您喜欢的语音模型。
4
+ tags:
5
+ - LobeChat
6
+ - 文字语音转换
7
+ - TTS
8
+ - STT
9
+ - 语音模型
10
+ ---
11
+
12
+ # 文字语音转换使用指南
13
+
14
+ LobeChat 支持文字语音转换功能,允许用户通过语音输入内容,以及将 AI 输出的内容通过语音播报。
15
+
16
+ ## 文字转语音(TTS)
17
+
18
+ 在对话窗口中选中任意内容,选择`文字转语音`,AI 将通过 TTS 模型对文本内容进行语音播报。
19
+
20
+ <Image
21
+ alt={'TTS'}
22
+ src={'https://github.com/user-attachments/assets/d2714769-15f8-4d70-9128-607134163c52'}
23
+ />
24
+
25
+ ## 语音转文字(STT)
26
+
27
+ 在输入窗口中选择语音输入功能,LobeChat 将您的语音转换为文字并输入到文本框中,完成输入后可以直接发送给 AI。
28
+
29
+ <Image
30
+ alt={'STT'}
31
+ src={'https://github.com/user-attachments/assets/d643af6d-ca0f-4abd-9dd2-977dacecb25d'}
32
+ />
33
+
34
+ ## 文字语音转换设置
35
+
36
+ 你可以在设置中为文字语音转换功能指定您希望使用的模型。
37
+
38
+ <Image
39
+ alt={'TTS 设置'}
40
+ src={'https://github.com/user-attachments/assets/2f7c5c45-ec6a-4393-8fa9-19a4c5f52f7a'}
41
+ />
42
+
43
+ - 打开`设置`面板
44
+ - 找到`文字转语音`设置
45
+ - 选择您所需的语音服务和 AI 模型
@@ -0,0 +1,49 @@
1
+ ---
2
+ title: Enhancing Multimodal Interaction with Visual Recognition Models
3
+ description: >-
4
+ Explore how LobeChat integrates visual recognition capabilities into large
5
+ language models, enabling multimodal interactions for enhanced user
6
+ experiences.
7
+ tags:
8
+ - Visual Recognition
9
+ - Multimodal Interaction
10
+ - Large Language Models
11
+ - LobeChat
12
+ - Custom Model Configuration
13
+ ---
14
+
15
+ # Visual Model User Guide
16
+
17
+ The ecosystem of large language models that support visual recognition is becoming increasingly rich. Starting from `gpt-4-vision`, LobeChat now supports various large language models with visual recognition capabilities, enabling LobeChat to have multimodal interaction capabilities.
18
+
19
+ <Video
20
+ alt={'Visual Model Usage'}
21
+ src={'https://github.com/user-attachments/assets/1c6b4975-bfc3-4470-a934-558ff7a16941'}
22
+ />
23
+
24
+ ## Image Input
25
+
26
+ If the model you are currently using supports visual recognition, you can input image content by uploading a file or dragging the image directly into the input box. The model will automatically recognize the image content and provide feedback based on your prompts.
27
+
28
+ <Image
29
+ alt={'Image Input'}
30
+ src={'https://github.com/user-attachments/assets/e6836560-8b05-4382-b761-d7624da4b0f1'}
31
+ />
32
+
33
+ ## Visual Models
34
+
35
+ In the model list, models with a `👁️` icon next to their names indicate that the model supports visual recognition. Selecting such a model allows you to send image content.
36
+
37
+ <Image
38
+ alt={'Visual Models'}
39
+ src={'https://github.com/user-attachments/assets/fa07a326-04c8-4744-bb93-cef715d1d71f'}
40
+ />
41
+
42
+ ## Custom Model Configuration
43
+
44
+ If you need to add a custom model that is not currently in the list and explicitly supports visual recognition, you can enable the `Visual Recognition` feature in the `Custom Model Configuration` to allow the model to interact with images.
45
+
46
+ <Image
47
+ alt={'Custom Model Configuration'}
48
+ src={'https://github.com/user-attachments/assets/c24718cc-402b-4298-b046-8b4aee610cbc'}
49
+ />
@@ -0,0 +1,45 @@
1
+ ---
2
+ title: 视觉模型使用指南 - 支持多模态交互的大语言模型
3
+ description: "了解如何在LobeChat中使用支持视觉识别功能的大语言模型,通过上传图片或拖拽图片到输入框进行交互,并选择带有\U0001F441️图标的模型进行图片内容交互。"
4
+ tags:
5
+ - 视觉模型
6
+ - 多模态交互
7
+ - 大语言模型
8
+ - 自定义模型配置
9
+ ---
10
+
11
+ # 视觉模型使用指南
12
+
13
+ 当前支持视觉识别的大语言模型生态日益丰富。从 `gpt-4-vision` 开始,LobeChat 开始支持各类具有视觉识别能力的大语言模型,这使得 LobeChat 具备了多模态交互的能力。
14
+
15
+ <Video
16
+ alt={'视觉模型使用'}
17
+ src={'https://github.com/user-attachments/assets/1c6b4975-bfc3-4470-a934-558ff7a16941'}
18
+ />
19
+
20
+ ## 图片输入
21
+
22
+ 如果你当前使用的模型支持视觉识别功能,您可以通过上传文件或直接将图片拖入输入框的方式输入图片内容。模型会自动识别图片内容,并根据您的提示词给出反馈。
23
+
24
+ <Image
25
+ alt={'图片输入'}
26
+ src={'https://github.com/user-attachments/assets/e6836560-8b05-4382-b761-d7624da4b0f1'}
27
+ />
28
+
29
+ ## 视觉模型
30
+
31
+ 在模型列表中,模型名称后面带有`👁️`图标表示该模型支持视觉识别功能。选择该模型后即可发送图片内容。
32
+
33
+ <Image
34
+ alt={'视觉模型'}
35
+ src={'https://github.com/user-attachments/assets/fa07a326-04c8-4744-bb93-cef715d1d71f'}
36
+ />
37
+
38
+ ## 自定义模型配置
39
+
40
+ 如果您需要添加当前列表中没有的自定义模型,并且该模型明确支持视觉识别功能,您可以在`自定义模型配置`中开启`视觉识别`功能,使该模型能够与图片进行交互。
41
+
42
+ <Image
43
+ alt={'自定义模型配置'}
44
+ src={'https://github.com/user-attachments/assets/c24718cc-402b-4298-b046-8b4aee610cbc'}
45
+ />
@@ -0,0 +1,64 @@
1
+ ---
2
+ title: Using Baichuan API Key in LobeChat
3
+ description: >-
4
+ Learn how to integrate Baichuan AI into LobeChat for enhanced conversational
5
+ experiences. Follow the steps to configure Baichuan AI and start using its
6
+ models.
7
+ tags:
8
+ - LobeChat
9
+ - Baichuan
10
+ - API Key
11
+ - Web UI
12
+ ---
13
+
14
+ # Using Baichuan in LobeChat
15
+
16
+ <Image
17
+ alt={'Using Baichuan in LobeChat'}
18
+ cover
19
+ src={'https://github.com/user-attachments/assets/d961f2af-47b0-4806-8288-b1e8f7ee8a47'}
20
+ />
21
+
22
+ This article will guide you on how to use Baichuan in LobeChat:
23
+
24
+ <Steps>
25
+
26
+ ### Step 1: Obtain Baichuan Intelligent API Key
27
+
28
+ - Create a [Baichuan Intelligent](https://platform.baichuan-ai.com/homePage) account
29
+ - Create and obtain an [API key](https://platform.baichuan-ai.com/console/apikey)
30
+
31
+ <Image
32
+ alt={'Create API Key'}
33
+ inStep
34
+ src={'https://github.com/user-attachments/assets/8787716c-833e-44ab-b506-922ddb6121de'}
35
+ />
36
+
37
+ ### Step 2: Configure Baichuan in LobeChat
38
+
39
+ - Visit the `Settings` interface in LobeChat
40
+ - Find the setting for `Baichuan` under `Language Model`
41
+
42
+ <Image
43
+ alt={'Enter API Key'}
44
+ inStep
45
+ src={'https://github.com/user-attachments/assets/dec6665a-b3ec-4c50-a57f-7c7eb3160e7b'}
46
+ />
47
+
48
+ - Enter the obtained API key
49
+ - Choose a Baichuan model for your AI assistant to start the conversation
50
+
51
+ <Image
52
+ alt={'Select Baichuan model and start conversation'}
53
+ inStep
54
+ src={'https://github.com/user-attachments/assets/bfda556a-d3fc-409f-8647-e718788f2fb8'}
55
+ />
56
+
57
+ <Callout type={'warning'}>
58
+ During usage, you may need to pay the API service provider, please refer to Baichuan's relevant
59
+ pricing policies.
60
+ </Callout>
61
+
62
+ </Steps>
63
+
64
+ You can now use the models provided by Baichuan for conversation in LobeChat.
@@ -0,0 +1,61 @@
1
+ ---
2
+ title: 在 LobeChat 中使用百川 API Key
3
+ description: 学习如何在 LobeChat 中配置和使用百川的API Key,以便开始对话和交互。
4
+ tags:
5
+ - LobeChat
6
+ - 百川
7
+ - 百川智能
8
+ - API密钥
9
+ - Web UI
10
+ ---
11
+
12
+ # 在 LobeChat 中使用百川
13
+
14
+ <Image
15
+ alt={'在 LobeChat 中使用百川'}
16
+ cover
17
+ src={'https://github.com/user-attachments/assets/d961f2af-47b0-4806-8288-b1e8f7ee8a47'}
18
+ />
19
+
20
+ 本文将指导你如何在 LobeChat 中使用百川:
21
+
22
+ <Steps>
23
+
24
+ ### 步骤一:获取百川智能 API 密钥
25
+
26
+ - 创建一个[百川智能](https://platform.baichuan-ai.com/homePage)账户
27
+ - 创建并获取 [API 密钥](https://platform.baichuan-ai.com/console/apikey)
28
+
29
+ <Image
30
+ alt={'创建 API Key'}
31
+ inStep
32
+ src={'https://github.com/user-attachments/assets/8787716c-833e-44ab-b506-922ddb6121de'}
33
+ />
34
+
35
+ ### 步骤二:在 LobeChat 中配置百川
36
+
37
+ - 访问 LobeChat 的`设置`界面
38
+ - 在`语言模型`下找到`百川`的设置项
39
+
40
+ <Image
41
+ alt={'填入 API 密钥'}
42
+ inStep
43
+ src={'https://github.com/user-attachments/assets/dec6665a-b3ec-4c50-a57f-7c7eb3160e7b'}
44
+ />
45
+
46
+ - 填入获得的 API 密钥
47
+ - 为你的 AI 助手选择一个百川的模型即可开始对话
48
+
49
+ <Image
50
+ alt={'选择百川模型并开始对话'}
51
+ inStep
52
+ src={'https://github.com/user-attachments/assets/bfda556a-d3fc-409f-8647-e718788f2fb8'}
53
+ />
54
+
55
+ <Callout type={'warning'}>
56
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考百川的相关费用政策。
57
+ </Callout>
58
+
59
+ </Steps>
60
+
61
+ 至此你已经可以在 LobeChat 中使用百川提供的模型进行对话了。
@@ -0,0 +1,64 @@
1
+ ---
2
+ title: Using Taichu API Key in LobeChat
3
+ description: >-
4
+ Learn how to integrate Taichu AI into LobeChat for enhanced conversational
5
+ experiences. Follow the steps to configure Taichu AI and start using its
6
+ models.
7
+ tags:
8
+ - LobeChat
9
+ - Taichu
10
+ - API Key
11
+ - Web UI
12
+ ---
13
+
14
+ # Using Taichu in LobeChat
15
+
16
+ <Image
17
+ alt={'Using Taichu in LobeChat'}
18
+ cover
19
+ src={'https://github.com/user-attachments/assets/9cb27b68-f2ac-4ff9-8f97-d96314b1af03'}
20
+ />
21
+
22
+ This article will guide you on how to use Taichu in LobeChat:
23
+
24
+ <Steps>
25
+
26
+ ### Step 1: Obtain Taichu API Key
27
+
28
+ - Create an account on [Taichu](https://ai-maas.wair.ac.cn/)
29
+ - Create and obtain an [API key](https://ai-maas.wair.ac.cn/#/settlement/api/key)
30
+
31
+ <Image
32
+ alt={'Create API Key'}
33
+ inStep
34
+ src={'https://github.com/user-attachments/assets/8d90ae64-cf8e-4d90-8a31-c18ab484740b'}
35
+ />
36
+
37
+ ### Step 2: Configure Taichu in LobeChat
38
+
39
+ - Go to the `Settings` interface in LobeChat
40
+ - Find the setting for `Taichu` under `Language Model`
41
+
42
+ <Image
43
+ alt={'Enter API Key'}
44
+ inStep
45
+ src={'https://github.com/user-attachments/assets/55028fe5-44db-49e2-93c5-5dabbd664f10'}
46
+ />
47
+
48
+ - Enter the obtained API key
49
+ - Choose a Purple Taichu model for your AI assistant to start the conversation
50
+
51
+ <Image
52
+ alt={'Select Tai Chi model and start conversation'}
53
+ inStep
54
+ src={'https://github.com/user-attachments/assets/c44b6894-70cb-4876-b792-2e76e75ac542'}
55
+ />
56
+
57
+ <Callout type={'warning'}>
58
+ During usage, you may need to pay the API service provider, please refer to Taichu's relevant
59
+ pricing policies.
60
+ </Callout>
61
+
62
+ </Steps>
63
+
64
+ Now you can start conversing with the models provided by Taichu in LobeChat.
@@ -0,0 +1,61 @@
1
+ ---
2
+ title: 在 LobeChat 中使用紫东太初 API Key
3
+ description: 学习如何在 LobeChat 中配置和使用紫东太初的API Key,以便开始对话和交互。
4
+ tags:
5
+ - LobeChat
6
+ - 太初
7
+ - 紫东太初
8
+ - API密钥
9
+ - Web UI
10
+ ---
11
+
12
+ # 在 LobeChat 中使用紫东太初
13
+
14
+ <Image
15
+ alt={'在 LobeChat 中使用太初'}
16
+ cover
17
+ src={'https://github.com/user-attachments/assets/9cb27b68-f2ac-4ff9-8f97-d96314b1af03'}
18
+ />
19
+
20
+ 本文将指导你如何在 LobeChat 中使用紫东太初:
21
+
22
+ <Steps>
23
+
24
+ ### 步骤一:获取紫东太初 API 密钥
25
+
26
+ - 创建一个[紫东太初](https://ai-maas.wair.ac.cn/)账户
27
+ - 创建并获取 [API 密钥](https://ai-maas.wair.ac.cn/#/settlement/api/key)
28
+
29
+ <Image
30
+ alt={'创建 API Key'}
31
+ inStep
32
+ src={'https://github.com/user-attachments/assets/8d90ae64-cf8e-4d90-8a31-c18ab484740b'}
33
+ />
34
+
35
+ ### 步骤二:在 LobeChat 中配置紫东太初
36
+
37
+ - 访问 LobeChat 的`设置`界面
38
+ - 在`语言模型`下找到`紫东太初`的设置项
39
+
40
+ <Image
41
+ alt={'填入 API 密钥'}
42
+ inStep
43
+ src={'https://github.com/user-attachments/assets/55028fe5-44db-49e2-93c5-5dabbd664f10'}
44
+ />
45
+
46
+ - 填入获得的 API 密钥
47
+ - 为你的 AI 助手选择一个紫东太初的模型即可开始对话
48
+
49
+ <Image
50
+ alt={'选择太初模型并开始对话'}
51
+ inStep
52
+ src={'https://github.com/user-attachments/assets/c44b6894-70cb-4876-b792-2e76e75ac542'}
53
+ />
54
+
55
+ <Callout type={'warning'}>
56
+ 在使用过程中你可能需要向 API 服务提供商付费,请参考紫东太初的相关费用政策。
57
+ </Callout>
58
+
59
+ </Steps>
60
+
61
+ 至此你已经可以在 LobeChat 中使用紫东太初提供的模型进行对话了。
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.6.4",
3
+ "version": "1.6.6",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -113,9 +113,9 @@
113
113
  "@khmyznikov/pwa-install": "^0.3.9",
114
114
  "@lobehub/chat-plugin-sdk": "^1.32.4",
115
115
  "@lobehub/chat-plugins-gateway": "^1.9.0",
116
- "@lobehub/icons": "^1.26.0",
116
+ "@lobehub/icons": "^1.27.0",
117
117
  "@lobehub/tts": "^1.24.3",
118
- "@lobehub/ui": "^1.146.6",
118
+ "@lobehub/ui": "^1.146.9",
119
119
  "@microsoft/fetch-event-source": "^2.0.1",
120
120
  "@neondatabase/serverless": "^0.9.4",
121
121
  "@next/third-parties": "^14.2.4",
@@ -138,7 +138,7 @@
138
138
  "debug": "^4.3.5",
139
139
  "dexie": "^3.2.7",
140
140
  "diff": "^5.2.0",
141
- "drizzle-orm": "^0.31.2",
141
+ "drizzle-orm": "^0.32.0",
142
142
  "drizzle-zod": "^0.5.1",
143
143
  "fast-deep-equal": "^3.1.3",
144
144
  "gpt-tokenizer": "^2.1.2",
@@ -241,7 +241,7 @@
241
241
  "consola": "^3.2.3",
242
242
  "dotenv": "^16.4.5",
243
243
  "dpdm": "^3.14.0",
244
- "drizzle-kit": "^0.22.8",
244
+ "drizzle-kit": "^0.23.0",
245
245
  "eslint": "^8.57.0",
246
246
  "eslint-plugin-mdx": "^2.3.4",
247
247
  "fake-indexeddb": "^6.0.0",
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://platform.deepseek.com/api-docs/pricing
3
4
  const DeepSeek: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -43,14 +43,16 @@ const Google: ModelProviderCard = {
43
43
  vision: true,
44
44
  },
45
45
  {
46
- description: 'The best model for scaling across a wide range of tasks. This is the latest model.',
46
+ description:
47
+ 'The best model for scaling across a wide range of tasks. This is the latest model.',
47
48
  displayName: 'Gemini 1.0 Pro',
48
49
  id: 'gemini-1.0-pro-latest',
49
50
  maxOutput: 2048,
50
51
  tokens: 30_720 + 2048,
51
52
  },
52
53
  {
53
- description: 'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
54
+ description:
55
+ 'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
54
56
  displayName: 'Gemini 1.0 Pro 001 (Tuning)',
55
57
  functionCall: true,
56
58
  id: 'gemini-1.0-pro-001',
@@ -58,27 +60,13 @@ const Google: ModelProviderCard = {
58
60
  tokens: 30_720 + 2048,
59
61
  },
60
62
  {
61
- description: 'The best model for scaling across a wide range of tasks. Released April 9, 2024.',
63
+ description:
64
+ 'The best model for scaling across a wide range of tasks. Released April 9, 2024.',
62
65
  displayName: 'Gemini 1.0 Pro 002 (Tuning)',
63
66
  id: 'gemini-1.0-pro-002',
64
67
  maxOutput: 2048,
65
68
  tokens: 30_720 + 2048,
66
69
  },
67
- {
68
- description: 'The most capable model for highly complex tasks',
69
- displayName: 'Gemini 1.0 Ultra',
70
- id: 'gemini-ultra',
71
- maxOutput: 2048,
72
- tokens: 32_768,
73
- },
74
- {
75
- description: 'A legacy text-only model optimized for chat conversations',
76
- displayName: 'PaLM 2 Chat (Legacy)',
77
- id: 'chat-bison-001',
78
- legacy: true,
79
- maxOutput: 1024,
80
- // tokens: 4096 + 1024, // none tokens test
81
- }
82
70
  ],
83
71
  checkModel: 'gemini-1.5-flash',
84
72
  id: 'google',
@@ -4,7 +4,7 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Groq: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'LLaMA3-3-70B',
7
+ displayName: 'LLaMA3 70B',
8
8
  enabled: true,
9
9
  functionCall: true,
10
10
  id: 'llama3-70b-8192',
@@ -18,14 +18,20 @@ const Groq: ModelProviderCard = {
18
18
  tokens: 32_768,
19
19
  },
20
20
  {
21
- displayName: 'Gemma-7b-it',
22
- enabled: true,
21
+ displayName: 'Gemma 7B',
23
22
  functionCall: true,
24
23
  id: 'gemma-7b-it',
25
24
  tokens: 8192,
26
25
  },
27
26
  {
28
- displayName: 'LLaMA3-3-8B',
27
+ displayName: 'Gemma2 9B',
28
+ enabled: true,
29
+ functionCall: true,
30
+ id: 'gemma2-9b-it',
31
+ tokens: 8192,
32
+ },
33
+ {
34
+ displayName: 'LLaMA3 8B',
29
35
  enabled: true,
30
36
  functionCall: true,
31
37
  id: 'llama3-8b-8192',
@@ -37,7 +43,7 @@ const Groq: ModelProviderCard = {
37
43
  tokens: 4096,
38
44
  },
39
45
  ],
40
- checkModel: 'gemma-7b-it',
46
+ checkModel: 'gemma2-9b-it',
41
47
  id: 'groq',
42
48
  name: 'Groq',
43
49
  proxyUrl: {
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://ollama.com/library
3
4
  const Ollama: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -78,6 +79,28 @@ const Ollama: ModelProviderCard = {
78
79
  id: 'gemma:2b',
79
80
  tokens: 8192,
80
81
  },
82
+ {
83
+ displayName: 'Deepseek V2 16B',
84
+ enabled: true,
85
+ id: 'deepseek-v2',
86
+ tokens: 32_000,
87
+ },
88
+ {
89
+ displayName: 'Deepseek V2 236B',
90
+ id: 'deepseek-v2:236b',
91
+ tokens: 128_000,
92
+ },
93
+ {
94
+ displayName: 'Deepseek Coder V2 16B', // https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
95
+ enabled: true,
96
+ id: 'deepseek-coder-v2',
97
+ tokens: 128_000,
98
+ },
99
+ {
100
+ displayName: 'Deepseek Coder V2 236B', // https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
101
+ id: 'deepseek-coder-v2:236b',
102
+ tokens: 128_000,
103
+ },
81
104
  {
82
105
  displayName: 'Llama2 Chat 13B',
83
106
  id: 'llama2:13b',
@@ -135,10 +158,10 @@ const Ollama: ModelProviderCard = {
135
158
  tokens: 16_384,
136
159
  },
137
160
  {
138
- displayName: 'Mistral',
161
+ displayName: 'MathΣtral',
139
162
  enabled: true,
140
- id: 'mistral',
141
- tokens: 32_768, // https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json
163
+ id: 'mathstral',
164
+ tokens: 32_000, // https://huggingface.co/mistralai/mathstral-7B-v0.1
142
165
  },
143
166
  {
144
167
  displayName: 'Mixtral 8x7B',
@@ -1,6 +1,8 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://platform.openai.com/docs/models
3
+ // ref:
4
+ // https://platform.openai.com/docs/models
5
+ // https://platform.openai.com/docs/deprecations
4
6
  const OpenAI: ModelProviderCard = {
5
7
  chatModels: [
6
8
  {
@@ -53,14 +55,14 @@ const OpenAI: ModelProviderCard = {
53
55
  tokens: 128_000,
54
56
  },
55
57
  {
56
- description: 'Currently points to gpt-4-1106-vision-preview',
58
+ description: 'Currently points to gpt-4-1106-vision-preview', // Will be discontinued on December 6, 2024
57
59
  displayName: 'GPT-4 Turbo Vision Preview',
58
60
  id: 'gpt-4-vision-preview',
59
61
  tokens: 128_000,
60
62
  vision: true,
61
63
  },
62
64
  {
63
- displayName: 'GPT-4 Turbo Vision Preview (1106)',
65
+ displayName: 'GPT-4 Turbo Vision Preview (1106)', // Will be discontinued on December 6, 2024
64
66
  id: 'gpt-4-1106-vision-preview',
65
67
  tokens: 128_000,
66
68
  vision: true,
@@ -85,20 +87,21 @@ const OpenAI: ModelProviderCard = {
85
87
  tokens: 8192,
86
88
  },
87
89
  {
88
- description: 'Currently points to gpt-4-32k-0613',
90
+ description: 'Currently points to gpt-4-32k-0613', // Will be discontinued on June 6, 2025
89
91
  displayName: 'GPT-4 32K',
90
92
  functionCall: true,
91
93
  id: 'gpt-4-32k',
92
94
  tokens: 32_768,
93
95
  },
94
96
  {
95
- displayName: 'GPT-4 32K (0613)',
97
+ displayName: 'GPT-4 32K (0613)', // Will be discontinued on June 6, 2025
96
98
  functionCall: true,
97
99
  id: 'gpt-4-32k-0613',
98
100
  tokens: 32_768,
99
101
  },
100
102
  {
101
- description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
103
+ description:
104
+ 'GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125',
102
105
  displayName: 'GPT-3.5 Turbo',
103
106
  functionCall: true,
104
107
  id: 'gpt-3.5-turbo',
@@ -122,20 +125,20 @@ const OpenAI: ModelProviderCard = {
122
125
  tokens: 4096,
123
126
  },
124
127
  {
125
- description: 'Currently points to gpt-3.5-turbo-16k-0613',
128
+ description: 'Currently points to gpt-3.5-turbo-16k-0613', // Will be discontinued on September 13, 2024
126
129
  displayName: 'GPT-3.5 Turbo 16K',
127
130
  id: 'gpt-3.5-turbo-16k',
128
131
  legacy: true,
129
132
  tokens: 16_385,
130
133
  },
131
134
  {
132
- displayName: 'GPT-3.5 Turbo (0613)',
135
+ displayName: 'GPT-3.5 Turbo (0613)', // Will be discontinued on September 13, 2024
133
136
  id: 'gpt-3.5-turbo-0613',
134
137
  legacy: true,
135
138
  tokens: 4096,
136
139
  },
137
140
  {
138
- displayName: 'GPT-3.5 Turbo 16K (0613)',
141
+ description: 'Currently points to gpt-3.5-turbo-16k-0613', // Will be discontinued on September 13, 2024
139
142
  id: 'gpt-3.5-turbo-16k-0613',
140
143
  legacy: true,
141
144
  tokens: 16_385,
@@ -4,7 +4,7 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Perplexity: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'Perplexity 7B Chat',
7
+ displayName: 'Perplexity 8B Chat',
8
8
  id: 'llama-3-sonar-small-32k-chat',
9
9
  tokens: 32_768,
10
10
  },
@@ -15,7 +15,7 @@ const Perplexity: ModelProviderCard = {
15
15
  tokens: 32_768,
16
16
  },
17
17
  {
18
- displayName: 'Perplexity 7B Online',
18
+ displayName: 'Perplexity 8B Online',
19
19
  id: 'llama-3-sonar-small-32k-online',
20
20
  tokens: 28_000,
21
21
  },
@@ -41,7 +41,7 @@ const Perplexity: ModelProviderCard = {
41
41
  tokens: 16_384,
42
42
  },
43
43
  ],
44
- checkModel: 'pplx-7b-chat',
44
+ checkModel: 'llama-3-8b-instruct',
45
45
  id: 'perplexity',
46
46
  name: 'Perplexity',
47
47
  proxyUrl: {