@lobehub/chat 1.124.0 → 1.124.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +5 -0
- package/.github/scripts/pr-comment.js +11 -2
- package/.github/workflows/desktop-pr-build.yml +86 -12
- package/.github/workflows/release-desktop-beta.yml +91 -20
- package/CHANGELOG.md +58 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/apps/desktop/electron-builder.js +8 -4
- package/changelog/v1.json +21 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +20 -0
- package/locales/ar/chat.json +2 -0
- package/locales/bg-BG/chat.json +2 -0
- package/locales/de-DE/chat.json +2 -0
- package/locales/en-US/chat.json +2 -0
- package/locales/es-ES/chat.json +2 -0
- package/locales/fa-IR/chat.json +2 -0
- package/locales/fr-FR/chat.json +2 -0
- package/locales/it-IT/chat.json +2 -0
- package/locales/ja-JP/chat.json +2 -0
- package/locales/ko-KR/chat.json +2 -0
- package/locales/nl-NL/chat.json +2 -0
- package/locales/pl-PL/chat.json +2 -0
- package/locales/pt-BR/chat.json +2 -0
- package/locales/ru-RU/chat.json +2 -0
- package/locales/tr-TR/chat.json +2 -0
- package/locales/vi-VN/chat.json +2 -0
- package/locales/zh-CN/chat.json +2 -0
- package/locales/zh-CN/modelProvider.json +1 -1
- package/locales/zh-TW/chat.json +2 -0
- package/package.json +1 -1
- package/packages/const/src/hotkeys.ts +1 -1
- package/packages/const/src/index.ts +1 -0
- package/packages/const/src/settings/hotkey.ts +3 -2
- package/packages/const/src/trace.ts +1 -1
- package/packages/const/src/user.ts +1 -2
- package/packages/database/src/client/db.test.ts +19 -13
- package/packages/electron-server-ipc/src/ipcClient.test.ts +783 -1
- package/packages/file-loaders/src/loadFile.test.ts +61 -0
- package/packages/file-loaders/src/utils/isTextReadableFile.test.ts +43 -0
- package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
- package/packages/model-bank/src/aiModels/aihubmix.ts +38 -4
- package/packages/model-bank/src/aiModels/groq.ts +26 -8
- package/packages/model-bank/src/aiModels/hunyuan.ts +3 -3
- package/packages/model-bank/src/aiModels/modelscope.ts +13 -2
- package/packages/model-bank/src/aiModels/moonshot.ts +25 -5
- package/packages/model-bank/src/aiModels/novita.ts +40 -9
- package/packages/model-bank/src/aiModels/openrouter.ts +0 -13
- package/packages/model-bank/src/aiModels/qwen.ts +62 -1
- package/packages/model-bank/src/aiModels/siliconcloud.ts +20 -0
- package/packages/model-bank/src/aiModels/volcengine.ts +141 -15
- package/packages/model-runtime/package.json +2 -1
- package/packages/model-runtime/src/ai21/index.test.ts +2 -2
- package/packages/model-runtime/src/ai360/index.test.ts +2 -2
- package/packages/model-runtime/src/akashchat/index.test.ts +19 -0
- package/packages/model-runtime/src/anthropic/index.test.ts +1 -2
- package/packages/model-runtime/src/baichuan/index.test.ts +1 -2
- package/packages/model-runtime/src/bedrock/index.test.ts +1 -2
- package/packages/model-runtime/src/bfl/createImage.test.ts +1 -2
- package/packages/model-runtime/src/bfl/index.test.ts +1 -2
- package/packages/model-runtime/src/cloudflare/index.test.ts +1 -2
- package/packages/model-runtime/src/cohere/index.test.ts +19 -0
- package/packages/model-runtime/src/deepseek/index.test.ts +2 -2
- package/packages/model-runtime/src/fireworksai/index.test.ts +2 -2
- package/packages/model-runtime/src/giteeai/index.test.ts +2 -2
- package/packages/model-runtime/src/github/index.test.ts +2 -2
- package/packages/model-runtime/src/google/createImage.test.ts +1 -2
- package/packages/model-runtime/src/google/index.test.ts +1 -1
- package/packages/model-runtime/src/groq/index.test.ts +2 -3
- package/packages/model-runtime/src/huggingface/index.test.ts +40 -0
- package/packages/model-runtime/src/hunyuan/index.test.ts +2 -3
- package/packages/model-runtime/src/internlm/index.test.ts +2 -2
- package/packages/model-runtime/src/jina/index.test.ts +19 -0
- package/packages/model-runtime/src/lmstudio/index.test.ts +2 -2
- package/packages/model-runtime/src/minimax/index.test.ts +19 -0
- package/packages/model-runtime/src/mistral/index.test.ts +2 -3
- package/packages/model-runtime/src/modelscope/index.test.ts +19 -0
- package/packages/model-runtime/src/moonshot/index.test.ts +1 -2
- package/packages/model-runtime/src/nebius/index.test.ts +19 -0
- package/packages/model-runtime/src/newapi/index.test.ts +49 -42
- package/packages/model-runtime/src/newapi/index.ts +124 -143
- package/packages/model-runtime/src/novita/index.test.ts +3 -4
- package/packages/model-runtime/src/nvidia/index.test.ts +19 -0
- package/packages/model-runtime/src/openrouter/index.test.ts +2 -3
- package/packages/model-runtime/src/perplexity/index.test.ts +2 -3
- package/packages/model-runtime/src/ppio/index.test.ts +3 -4
- package/packages/model-runtime/src/qwen/index.test.ts +2 -2
- package/packages/model-runtime/src/sambanova/index.test.ts +19 -0
- package/packages/model-runtime/src/search1api/index.test.ts +19 -0
- package/packages/model-runtime/src/sensenova/index.test.ts +2 -2
- package/packages/model-runtime/src/spark/index.test.ts +2 -2
- package/packages/model-runtime/src/stepfun/index.test.ts +2 -2
- package/packages/model-runtime/src/taichu/index.test.ts +4 -5
- package/packages/model-runtime/src/tencentcloud/index.test.ts +1 -1
- package/packages/model-runtime/src/togetherai/index.test.ts +1 -2
- package/packages/model-runtime/src/upstage/index.test.ts +1 -2
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +9 -7
- package/packages/model-runtime/src/utils/streams/anthropic.ts +2 -2
- package/packages/model-runtime/src/utils/streams/openai/openai.ts +20 -13
- package/packages/model-runtime/src/utils/streams/openai/responsesStream.test.ts +1 -2
- package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +2 -2
- package/packages/model-runtime/src/utils/streams/protocol.ts +2 -2
- package/packages/model-runtime/src/wenxin/index.test.ts +2 -3
- package/packages/model-runtime/src/xai/index.test.ts +2 -2
- package/packages/model-runtime/src/zeroone/index.test.ts +1 -2
- package/packages/model-runtime/src/zhipu/index.test.ts +2 -3
- package/packages/model-runtime/vitest.config.mts +0 -7
- package/packages/types/src/index.ts +2 -0
- package/packages/types/src/message/base.ts +1 -1
- package/packages/types/src/openai/chat.ts +2 -3
- package/packages/utils/package.json +2 -1
- package/packages/utils/src/_deprecated/parseModels.test.ts +1 -1
- package/packages/utils/src/_deprecated/parseModels.ts +1 -1
- package/packages/utils/src/client/topic.test.ts +1 -2
- package/packages/utils/src/client/topic.ts +1 -2
- package/packages/utils/src/electron/desktopRemoteRPCFetch.ts +1 -1
- package/packages/utils/src/fetch/fetchSSE.ts +7 -8
- package/packages/utils/src/fetch/parseError.ts +1 -3
- package/packages/utils/src/format.test.ts +1 -2
- package/packages/utils/src/index.ts +1 -0
- package/packages/utils/src/toolManifest.ts +1 -2
- package/packages/utils/src/trace.ts +1 -1
- package/packages/utils/vitest.config.mts +1 -1
- package/packages/web-crawler/src/__tests__/urlRules.test.ts +275 -0
- package/packages/web-crawler/src/crawImpl/__tests__/exa.test.ts +269 -0
- package/packages/web-crawler/src/crawImpl/__tests__/firecrawl.test.ts +284 -0
- package/packages/web-crawler/src/crawImpl/__tests__/naive.test.ts +234 -0
- package/packages/web-crawler/src/crawImpl/__tests__/tavily.test.ts +359 -0
- package/packages/web-crawler/src/utils/__tests__/errorType.test.ts +217 -0
- package/packages/web-crawler/vitest.config.mts +3 -0
- package/scripts/electronWorkflow/mergeMacReleaseFiles.ts +207 -0
- package/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx +1 -1
- package/src/components/Thinking/index.tsx +2 -3
- package/src/config/llm.ts +8 -0
- package/src/features/ChatInput/Desktop/index.tsx +16 -4
- package/src/features/ChatInput/StoreUpdater.tsx +2 -0
- package/src/libs/traces/index.ts +1 -1
- package/src/locales/default/chat.ts +1 -0
- package/src/locales/default/modelProvider.ts +1 -1
- package/src/server/modules/ModelRuntime/trace.ts +1 -2
- package/src/store/chat/slices/aiChat/actions/__tests__/cancel-functionality.test.ts +107 -0
- package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChatV2.test.ts +352 -7
- package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +2 -1
- package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +0 -113
@@ -601,6 +601,68 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
601
601
|
},
|
602
602
|
type: 'chat',
|
603
603
|
},
|
604
|
+
{
|
605
|
+
abilities: {
|
606
|
+
functionCall: true,
|
607
|
+
},
|
608
|
+
config: {
|
609
|
+
deploymentName: 'qwen3-max-preview',
|
610
|
+
},
|
611
|
+
contextWindowTokens: 262_144,
|
612
|
+
description:
|
613
|
+
'通义千问3系列Max模型Preview版本,相较2.5系列整体通用能力有大幅度提升,中英文通用文本理解能力、复杂指令遵循能力、主观开放任务能力、多语言能力、工具调用能力均显著增强;模型知识幻觉更少。',
|
614
|
+
displayName: 'Qwen3 Max Preview',
|
615
|
+
enabled: true,
|
616
|
+
id: 'qwen3-max-preview',
|
617
|
+
maxOutput: 32_768,
|
618
|
+
organization: 'Qwen',
|
619
|
+
pricing: {
|
620
|
+
currency: 'CNY',
|
621
|
+
units: [
|
622
|
+
{
|
623
|
+
lookup: {
|
624
|
+
prices: {
|
625
|
+
'[0, 32_000]': 6 * 0.2,
|
626
|
+
'[32_000, 128_000]': 10 * 0.2,
|
627
|
+
'[128_000, infinity]': 15 * 0.2,
|
628
|
+
},
|
629
|
+
pricingParams: ['textInputRange'],
|
630
|
+
},
|
631
|
+
name: 'textInput_cacheRead',
|
632
|
+
strategy: 'lookup',
|
633
|
+
unit: 'millionTokens',
|
634
|
+
},
|
635
|
+
{
|
636
|
+
lookup: {
|
637
|
+
prices: {
|
638
|
+
'[0, 32_000]': 6,
|
639
|
+
'[32_000, 128_000]': 10,
|
640
|
+
'[128_000, infinity]': 15,
|
641
|
+
},
|
642
|
+
pricingParams: ['textInputRange'],
|
643
|
+
},
|
644
|
+
name: 'textInput',
|
645
|
+
strategy: 'lookup',
|
646
|
+
unit: 'millionTokens',
|
647
|
+
},
|
648
|
+
{
|
649
|
+
lookup: {
|
650
|
+
prices: {
|
651
|
+
'[0, 32_000]': 24,
|
652
|
+
'[32_000, 128_000]': 40,
|
653
|
+
'[128_000, infinity]': 60,
|
654
|
+
},
|
655
|
+
pricingParams: ['textInputRange'],
|
656
|
+
},
|
657
|
+
name: 'textOutput',
|
658
|
+
strategy: 'lookup',
|
659
|
+
unit: 'millionTokens',
|
660
|
+
},
|
661
|
+
],
|
662
|
+
},
|
663
|
+
releasedAt: '2025-09-05',
|
664
|
+
type: 'chat',
|
665
|
+
},
|
604
666
|
{
|
605
667
|
abilities: {
|
606
668
|
functionCall: true,
|
@@ -613,7 +675,6 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
613
675
|
description:
|
614
676
|
'通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型。',
|
615
677
|
displayName: 'Qwen Max',
|
616
|
-
enabled: true,
|
617
678
|
id: 'qwen-max',
|
618
679
|
maxOutput: 8192,
|
619
680
|
organization: 'Qwen',
|
@@ -47,6 +47,26 @@ const siliconcloudChatModels: AIChatModelCard[] = [
|
|
47
47
|
},
|
48
48
|
type: 'chat',
|
49
49
|
},
|
50
|
+
{
|
51
|
+
abilities: {
|
52
|
+
functionCall: true,
|
53
|
+
reasoning: true,
|
54
|
+
},
|
55
|
+
contextWindowTokens: 256_000,
|
56
|
+
description:
|
57
|
+
'Seed-OSS 是由字节跳动 Seed 团队开发的一系列开源大型语言模型,专为强大的长上下文处理、推理、智能体(agent)和通用能力而设计。该系列中的 Seed-OSS-36B-Instruct 是一个拥有 360 亿参数的指令微调模型,它原生支持超长上下文长度,使其能够一次性处理海量文档或复杂的代码库。该模型在推理、代码生成和智能体任务(如工具使用)方面进行了特别优化,同时保持了平衡且出色的通用能力。此模型的一大特色是“思考预算”(Thinking Budget)功能,允许用户根据需要灵活调整推理长度,从而在实际应用中有效提升推理效率。',
|
58
|
+
displayName: 'Seed OSS 36B Instruct',
|
59
|
+
id: 'ByteDance-Seed/Seed-OSS-36B-Instruct',
|
60
|
+
pricing: {
|
61
|
+
currency: 'CNY',
|
62
|
+
units: [
|
63
|
+
{ name: 'textInput', rate: 1.5, strategy: 'fixed', unit: 'millionTokens' },
|
64
|
+
{ name: 'textOutput', rate: 4, strategy: 'fixed', unit: 'millionTokens' },
|
65
|
+
],
|
66
|
+
},
|
67
|
+
releasedAt: '2025-08-20',
|
68
|
+
type: 'chat',
|
69
|
+
},
|
50
70
|
{
|
51
71
|
abilities: {
|
52
72
|
functionCall: true,
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import { AIChatModelCard, AIImageModelCard } from '../types/aiModel';
|
2
2
|
|
3
|
-
//
|
4
|
-
// pricing https://console.volcengine.com/ark/region:ark+cn-beijing/openManagement
|
3
|
+
// https://www.volcengine.com/docs/82379/1330310
|
5
4
|
|
6
5
|
const doubaoChatModels: AIChatModelCard[] = [
|
7
6
|
{
|
@@ -42,7 +41,6 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
42
41
|
description:
|
43
42
|
'Kimi-K2 是一款Moonshot AI推出的具备超强代码和 Agent 能力的 MoE 架构基础模型,总参数 1T,激活参数 32B。在通用知识推理、编程、数学、Agent 等主要类别的基准性能测试中,K2 模型的性能超过其他主流开源模型。',
|
44
43
|
displayName: 'Kimi K2',
|
45
|
-
enabled: true,
|
46
44
|
id: 'kimi-k2',
|
47
45
|
maxOutput: 16_384,
|
48
46
|
pricing: {
|
@@ -61,7 +59,59 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
61
59
|
vision: true,
|
62
60
|
},
|
63
61
|
config: {
|
64
|
-
deploymentName: 'doubao-seed-1-6-
|
62
|
+
deploymentName: 'doubao-seed-1-6-vision-250815',
|
63
|
+
},
|
64
|
+
contextWindowTokens: 256_000,
|
65
|
+
description:
|
66
|
+
'Doubao-Seed-1.6-vision 视觉深度思考模型,在教育、图像审核、巡检与安防和AI 搜索问答等场景下展现出更强的通用多模态理解和推理能力。支持 256k 上下文窗口,输出长度支持最大 64k tokens。',
|
67
|
+
displayName: 'Doubao Seed 1.6 Vision',
|
68
|
+
id: 'doubao-seed-1.6-vision',
|
69
|
+
maxOutput: 32_000,
|
70
|
+
pricing: {
|
71
|
+
currency: 'CNY',
|
72
|
+
units: [
|
73
|
+
{
|
74
|
+
lookup: {
|
75
|
+
prices: {
|
76
|
+
'[0, 32_000]': 0.8,
|
77
|
+
'[32_000, 128_000]': 2.4,
|
78
|
+
'[128_000, infinity]': 4.8,
|
79
|
+
},
|
80
|
+
pricingParams: ['textInputRange'],
|
81
|
+
},
|
82
|
+
name: 'textInput',
|
83
|
+
strategy: 'lookup',
|
84
|
+
unit: 'millionTokens',
|
85
|
+
},
|
86
|
+
{
|
87
|
+
lookup: {
|
88
|
+
prices: {
|
89
|
+
'[0, 32_000]': 8,
|
90
|
+
'[32_000, 128_000]': 16,
|
91
|
+
'[128_000, infinity]': 24,
|
92
|
+
},
|
93
|
+
pricingParams: ['textInputRange'],
|
94
|
+
},
|
95
|
+
name: 'textOutput',
|
96
|
+
strategy: 'lookup',
|
97
|
+
unit: 'millionTokens',
|
98
|
+
},
|
99
|
+
{ name: 'textInput_cacheRead', rate: 0.16, strategy: 'fixed', unit: 'millionTokens' },
|
100
|
+
],
|
101
|
+
},
|
102
|
+
settings: {
|
103
|
+
extendParams: ['enableReasoning'],
|
104
|
+
},
|
105
|
+
type: 'chat',
|
106
|
+
},
|
107
|
+
{
|
108
|
+
abilities: {
|
109
|
+
functionCall: true,
|
110
|
+
reasoning: true,
|
111
|
+
vision: true,
|
112
|
+
},
|
113
|
+
config: {
|
114
|
+
deploymentName: 'doubao-seed-1-6-thinking-250715',
|
65
115
|
},
|
66
116
|
contextWindowTokens: 256_000,
|
67
117
|
description:
|
@@ -69,12 +119,37 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
69
119
|
displayName: 'Doubao Seed 1.6 Thinking',
|
70
120
|
enabled: true,
|
71
121
|
id: 'doubao-seed-1.6-thinking',
|
72
|
-
maxOutput:
|
122
|
+
maxOutput: 32_000,
|
73
123
|
pricing: {
|
74
124
|
currency: 'CNY',
|
75
125
|
units: [
|
76
|
-
{
|
77
|
-
|
126
|
+
{
|
127
|
+
lookup: {
|
128
|
+
prices: {
|
129
|
+
'[0, 32_000]': 0.8,
|
130
|
+
'[32_000, 128_000]': 1.2,
|
131
|
+
'[128_000, infinity]': 2.4,
|
132
|
+
},
|
133
|
+
pricingParams: ['textInputRange'],
|
134
|
+
},
|
135
|
+
name: 'textInput',
|
136
|
+
strategy: 'lookup',
|
137
|
+
unit: 'millionTokens',
|
138
|
+
},
|
139
|
+
{
|
140
|
+
lookup: {
|
141
|
+
prices: {
|
142
|
+
'[0, 32_000]': 8,
|
143
|
+
'[32_000, 128_000]': 16,
|
144
|
+
'[128_000, infinity]': 24,
|
145
|
+
},
|
146
|
+
pricingParams: ['textInputRange'],
|
147
|
+
},
|
148
|
+
name: 'textOutput',
|
149
|
+
strategy: 'lookup',
|
150
|
+
unit: 'millionTokens',
|
151
|
+
},
|
152
|
+
{ name: 'textInput_cacheRead', rate: 0.16, strategy: 'fixed', unit: 'millionTokens' },
|
78
153
|
],
|
79
154
|
},
|
80
155
|
type: 'chat',
|
@@ -94,12 +169,38 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
94
169
|
displayName: 'Doubao Seed 1.6',
|
95
170
|
enabled: true,
|
96
171
|
id: 'doubao-seed-1.6',
|
97
|
-
maxOutput:
|
172
|
+
maxOutput: 32_000,
|
98
173
|
pricing: {
|
99
174
|
currency: 'CNY',
|
100
175
|
units: [
|
101
|
-
{
|
102
|
-
|
176
|
+
{
|
177
|
+
lookup: {
|
178
|
+
prices: {
|
179
|
+
'[0, 32_000]': 0.8,
|
180
|
+
'[32_000, 128_000]': 1.2,
|
181
|
+
'[128_000, infinity]': 2.4,
|
182
|
+
},
|
183
|
+
pricingParams: ['textInputRange'],
|
184
|
+
},
|
185
|
+
name: 'textInput',
|
186
|
+
strategy: 'lookup',
|
187
|
+
unit: 'millionTokens',
|
188
|
+
},
|
189
|
+
{
|
190
|
+
lookup: {
|
191
|
+
prices: {
|
192
|
+
'[0, 32_000]_[0, 8192]': 2,
|
193
|
+
'[0, 32_000]_[8192, infinity]': 8,
|
194
|
+
'[32_000, 128_000]_[0, infinity]': 16,
|
195
|
+
'[128_000, infinity]_[0, infinity]': 24,
|
196
|
+
},
|
197
|
+
pricingParams: ['textInputRange', 'textOutputRange'],
|
198
|
+
},
|
199
|
+
name: 'textOutput',
|
200
|
+
strategy: 'lookup',
|
201
|
+
unit: 'millionTokens',
|
202
|
+
},
|
203
|
+
{ name: 'textInput_cacheRead', rate: 0.16, strategy: 'fixed', unit: 'millionTokens' },
|
103
204
|
],
|
104
205
|
},
|
105
206
|
settings: {
|
@@ -114,7 +215,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
114
215
|
vision: true,
|
115
216
|
},
|
116
217
|
config: {
|
117
|
-
deploymentName: 'doubao-seed-1-6-flash-
|
218
|
+
deploymentName: 'doubao-seed-1-6-flash-250828',
|
118
219
|
},
|
119
220
|
contextWindowTokens: 256_000,
|
120
221
|
description:
|
@@ -122,12 +223,37 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
122
223
|
displayName: 'Doubao Seed 1.6 Flash',
|
123
224
|
enabled: true,
|
124
225
|
id: 'doubao-seed-1.6-flash',
|
125
|
-
maxOutput:
|
226
|
+
maxOutput: 32_000,
|
126
227
|
pricing: {
|
127
228
|
currency: 'CNY',
|
128
229
|
units: [
|
129
|
-
{
|
130
|
-
|
230
|
+
{
|
231
|
+
lookup: {
|
232
|
+
prices: {
|
233
|
+
'[0, 32_000]': 0.15,
|
234
|
+
'[32_000, 128_000]': 0.3,
|
235
|
+
'[128_000, infinity]': 0.6,
|
236
|
+
},
|
237
|
+
pricingParams: ['textInputRange'],
|
238
|
+
},
|
239
|
+
name: 'textInput',
|
240
|
+
strategy: 'lookup',
|
241
|
+
unit: 'millionTokens',
|
242
|
+
},
|
243
|
+
{
|
244
|
+
lookup: {
|
245
|
+
prices: {
|
246
|
+
'[0, 32_000]': 1.5,
|
247
|
+
'[32_000, 128_000]': 3,
|
248
|
+
'[128_000, infinity]': 6,
|
249
|
+
},
|
250
|
+
pricingParams: ['textInputRange'],
|
251
|
+
},
|
252
|
+
name: 'textOutput',
|
253
|
+
strategy: 'lookup',
|
254
|
+
unit: 'millionTokens',
|
255
|
+
},
|
256
|
+
{ name: 'textInput_cacheRead', rate: 0.03, strategy: 'fixed', unit: 'millionTokens' },
|
131
257
|
],
|
132
258
|
},
|
133
259
|
settings: {
|
@@ -235,7 +361,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
235
361
|
],
|
236
362
|
},
|
237
363
|
settings: {
|
238
|
-
extendParams: ['
|
364
|
+
extendParams: ['thinking'],
|
239
365
|
},
|
240
366
|
type: 'chat',
|
241
367
|
},
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeAi21AI } from './index';
|
6
6
|
|
7
7
|
testProvider({
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeAi360AI } from './index';
|
6
6
|
|
7
7
|
testProvider({
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeAkashChatAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.AkashChat;
|
8
|
+
const defaultBaseURL = 'https://chatapi.akash.network/api/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeAkashChatAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_AKASH_CHAT_COMPLETION',
|
15
|
+
chatModel: 'llama-3.1-8b-instruct',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -1,8 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { ChatCompletionTool, ChatStreamPayload } from '@lobechat/model-runtime';
|
2
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import { ChatCompletionTool, ChatStreamPayload } from '@/libs/model-runtime';
|
5
|
-
|
6
5
|
import * as anthropicHelpers from '../utils/anthropicHelpers';
|
7
6
|
import * as debugStreamModule from '../utils/debugStream';
|
8
7
|
import { LobeAnthropicAI } from './index';
|
@@ -1,8 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
|
2
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@/libs/model-runtime';
|
5
|
-
|
6
5
|
import { testProvider } from '../providerTestUtils';
|
7
6
|
import { LobeBaichuanAI } from './index';
|
8
7
|
|
@@ -1,9 +1,8 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { InvokeModelWithResponseStreamCommand } from '@aws-sdk/client-bedrock-runtime';
|
3
|
+
import { AgentRuntimeErrorType, ModelProvider } from '@lobechat/model-runtime';
|
3
4
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
5
|
|
5
|
-
import { AgentRuntimeErrorType, ModelProvider } from '@/libs/model-runtime';
|
6
|
-
|
7
6
|
import * as debugStreamModule from '../utils/debugStream';
|
8
7
|
import { LobeBedrockAI } from './index';
|
9
8
|
|
@@ -1,8 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
3
|
|
4
|
-
import { CreateImagePayload } from '
|
5
|
-
|
4
|
+
import { CreateImagePayload } from '../types/image';
|
6
5
|
import { createBflImage } from './createImage';
|
7
6
|
import { BflStatusResponse } from './types';
|
8
7
|
|
@@ -1,8 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
3
|
|
4
|
-
import { CreateImagePayload } from '
|
5
|
-
|
4
|
+
import { CreateImagePayload } from '../types/image';
|
6
5
|
import { LobeBflAI } from './index';
|
7
6
|
|
8
7
|
// Mock the createBflImage function
|
@@ -1,8 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { ChatCompletionTool } from '@lobechat/model-runtime';
|
2
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import { ChatCompletionTool } from '@/libs/model-runtime';
|
5
|
-
|
6
5
|
import * as debugStreamModule from '../utils/debugStream';
|
7
6
|
import { LobeCloudflareAI } from './index';
|
8
7
|
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeCohereAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.Cohere;
|
8
|
+
const defaultBaseURL = 'https://api.cohere.ai/compatibility/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeCohereAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_COHERE_CHAT_COMPLETION',
|
15
|
+
chatModel: 'command-r7b',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeDeepSeekAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.DeepSeek;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeFireworksAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.FireworksAI;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeGiteeAI } from './index';
|
6
6
|
|
7
7
|
testProvider({
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeGithubAI } from './index';
|
6
6
|
|
7
7
|
testProvider({
|
@@ -2,8 +2,7 @@
|
|
2
2
|
import { GoogleGenAI } from '@google/genai';
|
3
3
|
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
4
4
|
|
5
|
-
import { CreateImagePayload } from '
|
6
|
-
|
5
|
+
import { CreateImagePayload } from '../types/image';
|
7
6
|
import * as imageToBase64Module from '../utils/imageToBase64';
|
8
7
|
import { createGoogleImage } from './createImage';
|
9
8
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
// @vitest-environment edge-runtime
|
2
2
|
import { GenerateContentResponse, Tool } from '@google/genai';
|
3
|
+
import { OpenAIChatMessage } from '@lobechat/model-runtime';
|
3
4
|
import OpenAI from 'openai';
|
4
5
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
5
6
|
|
6
|
-
import { OpenAIChatMessage } from '@/libs/model-runtime';
|
7
7
|
import { ChatStreamPayload } from '@/types/openai/chat';
|
8
8
|
|
9
9
|
import * as debugStreamModule from '../utils/debugStream';
|
@@ -1,9 +1,8 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
|
2
3
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import {
|
5
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
6
|
-
|
5
|
+
import { testProvider } from '../providerTestUtils';
|
7
6
|
import { LobeGroq } from './index';
|
8
7
|
|
9
8
|
testProvider({
|
@@ -0,0 +1,40 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import { LobeHuggingFaceAI } from './index';
|
6
|
+
|
7
|
+
describe('LobeHuggingFaceAI', () => {
|
8
|
+
let instance: any;
|
9
|
+
|
10
|
+
beforeEach(() => {
|
11
|
+
instance = new LobeHuggingFaceAI({ apiKey: 'test' });
|
12
|
+
|
13
|
+
const mockAsyncIterable = {
|
14
|
+
async *[Symbol.asyncIterator]() {
|
15
|
+
yield { choices: [] } as any;
|
16
|
+
},
|
17
|
+
} as any;
|
18
|
+
|
19
|
+
// mock custom client's chatCompletionStream
|
20
|
+
instance['client'] = {
|
21
|
+
chatCompletionStream: vi.fn().mockReturnValue(mockAsyncIterable),
|
22
|
+
} as any;
|
23
|
+
});
|
24
|
+
|
25
|
+
it('should initialize and return StreamingTextResponse on chat', async () => {
|
26
|
+
const res = await instance.chat({
|
27
|
+
messages: [{ role: 'user', content: 'hello' }],
|
28
|
+
model: 'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
29
|
+
temperature: 0,
|
30
|
+
stream: true,
|
31
|
+
});
|
32
|
+
|
33
|
+
expect(res).toBeInstanceOf(Response);
|
34
|
+
});
|
35
|
+
|
36
|
+
it('should set provider id properly', async () => {
|
37
|
+
// provider id 用于错误封装等,这里验证暴露 id 一致
|
38
|
+
expect(ModelProvider.HuggingFace).toBe('huggingface');
|
39
|
+
});
|
40
|
+
});
|
@@ -1,9 +1,8 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
|
2
3
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import {
|
5
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
6
|
-
|
5
|
+
import { testProvider } from '../providerTestUtils';
|
7
6
|
import { LobeHunyuanAI } from './index';
|
8
7
|
|
9
8
|
testProvider({
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeInternLMAI } from './index';
|
6
6
|
|
7
7
|
testProvider({
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeJinaAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.Jina;
|
8
|
+
const defaultBaseURL = 'https://deepsearch.jina.ai/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeJinaAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_JINA_CHAT_COMPLETION',
|
15
|
+
chatModel: 'jina-embeddings-v3',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeLMStudioAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.LMStudio;
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeMinimaxAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.Minimax;
|
8
|
+
const defaultBaseURL = 'https://api.minimax.chat/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeMinimaxAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_MINIMAX_CHAT_COMPLETION',
|
15
|
+
chatModel: 'abab6.5s-chat',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|