@lobehub/chat 1.117.1 → 1.118.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +8 -0
- package/AGENTS.md +133 -0
- package/CHANGELOG.md +58 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/chat.json +6 -1
- package/locales/ar/error.json +8 -8
- package/locales/ar/models.json +21 -0
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/chat.json +6 -1
- package/locales/bg-BG/error.json +8 -8
- package/locales/bg-BG/models.json +21 -0
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/chat.json +6 -1
- package/locales/de-DE/error.json +7 -7
- package/locales/de-DE/models.json +21 -0
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/chat.json +6 -1
- package/locales/en-US/error.json +4 -4
- package/locales/en-US/models.json +21 -0
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/chat.json +6 -1
- package/locales/es-ES/error.json +9 -9
- package/locales/es-ES/models.json +21 -0
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/chat.json +6 -1
- package/locales/fa-IR/error.json +9 -9
- package/locales/fa-IR/models.json +21 -0
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/chat.json +6 -1
- package/locales/fr-FR/error.json +9 -9
- package/locales/fr-FR/models.json +21 -0
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/chat.json +6 -1
- package/locales/it-IT/error.json +7 -7
- package/locales/it-IT/models.json +21 -0
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/chat.json +6 -1
- package/locales/ja-JP/error.json +8 -8
- package/locales/ja-JP/models.json +21 -0
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/chat.json +6 -1
- package/locales/ko-KR/error.json +8 -8
- package/locales/ko-KR/models.json +21 -0
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/chat.json +6 -1
- package/locales/nl-NL/error.json +8 -8
- package/locales/nl-NL/models.json +21 -0
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/chat.json +6 -1
- package/locales/pl-PL/error.json +9 -9
- package/locales/pl-PL/models.json +21 -0
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/chat.json +6 -1
- package/locales/pt-BR/error.json +8 -8
- package/locales/pt-BR/models.json +21 -0
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/chat.json +6 -1
- package/locales/ru-RU/error.json +7 -7
- package/locales/ru-RU/models.json +21 -0
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/chat.json +6 -1
- package/locales/tr-TR/error.json +9 -9
- package/locales/tr-TR/models.json +21 -0
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/chat.json +6 -1
- package/locales/vi-VN/models.json +21 -0
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/chat.json +6 -1
- package/locales/zh-CN/models.json +21 -0
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/chat.json +6 -1
- package/locales/zh-TW/error.json +4 -4
- package/locales/zh-TW/models.json +21 -0
- package/locales/zh-TW/providers.json +3 -0
- package/next.config.ts +22 -6
- package/package.json +1 -1
- package/packages/model-runtime/src/akashchat/index.ts +43 -0
- package/packages/model-runtime/src/google/index.ts +17 -3
- package/packages/model-runtime/src/index.ts +1 -0
- package/packages/model-runtime/src/openai/__snapshots__/index.test.ts.snap +1 -1
- package/packages/model-runtime/src/runtimeMap.ts +2 -0
- package/packages/model-runtime/src/types/chat.ts +4 -0
- package/packages/model-runtime/src/types/type.ts +1 -0
- package/packages/model-runtime/src/utils/modelParse.ts +14 -1
- package/packages/types/src/agent/chatConfig.ts +1 -0
- package/packages/types/src/aiModel.ts +2 -1
- package/packages/types/src/user/settings/keyVaults.ts +1 -0
- package/src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/config/aiModels/akashchat.ts +84 -0
- package/src/config/aiModels/google.ts +8 -6
- package/src/config/aiModels/index.ts +3 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/akashchat.ts +17 -0
- package/src/config/modelProviders/index.ts +3 -0
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +13 -1
- package/src/locales/default/chat.ts +4 -0
- package/src/services/chat.ts +4 -0
package/locales/zh-CN/chat.json
CHANGED
@@ -52,7 +52,11 @@
|
|
52
52
|
"thinking": {
|
53
53
|
"title": "深度思考开关"
|
54
54
|
},
|
55
|
-
"title": "模型扩展功能"
|
55
|
+
"title": "模型扩展功能",
|
56
|
+
"urlContext": {
|
57
|
+
"desc": "开启后将自动解析网页链接,以获取实际网页上下文内容",
|
58
|
+
"title": "提取网页链接内容"
|
59
|
+
}
|
56
60
|
},
|
57
61
|
"history": {
|
58
62
|
"title": "助手将只记住最后{{count}}条消息"
|
@@ -125,6 +129,7 @@
|
|
125
129
|
"inputWriteCached": "输入缓存写入",
|
126
130
|
"output": "输出",
|
127
131
|
"outputAudio": "音频输出",
|
132
|
+
"outputImage": "图像输出",
|
128
133
|
"outputText": "文本输出",
|
129
134
|
"outputTitle": "输出明细",
|
130
135
|
"reasoning": "深度思考",
|
@@ -74,6 +74,9 @@
|
|
74
74
|
"DeepSeek-V3": {
|
75
75
|
"description": "DeepSeek-V3 是一款由深度求索公司自研的MoE模型。DeepSeek-V3 多项评测成绩超越了 Qwen2.5-72B 和 Llama-3.1-405B 等其他开源模型,并在性能上和世界顶尖的闭源模型 GPT-4o 以及 Claude-3.5-Sonnet 不分伯仲。"
|
76
76
|
},
|
77
|
+
"DeepSeek-V3-1": {
|
78
|
+
"description": "DeepSeek V3.1:下一代推理模型,提升了复杂推理与链路思考能力,适合需要深入分析的任务。"
|
79
|
+
},
|
77
80
|
"DeepSeek-V3-Fast": {
|
78
81
|
"description": "模型供应商为:sophnet平台。DeepSeek V3 Fast 是 DeepSeek V3 0324 版本的高TPS极速版,满血非量化,代码与数学能力更强,响应更快!"
|
79
82
|
},
|
@@ -170,6 +173,9 @@
|
|
170
173
|
"Llama-3.2-90B-Vision-Instruct\t": {
|
171
174
|
"description": "适用于视觉理解代理应用的高级图像推理能力。"
|
172
175
|
},
|
176
|
+
"Meta-Llama-3-3-70B-Instruct": {
|
177
|
+
"description": "Llama 3.3 70B:通用性强的 Transformer 模型,适用于对话和生成任务。"
|
178
|
+
},
|
173
179
|
"Meta-Llama-3.1-405B-Instruct": {
|
174
180
|
"description": "Llama 3.1指令调优的文本模型,针对多语言对话用例进行了优化,在许多可用的开源和封闭聊天模型中,在常见行业基准上表现优异。"
|
175
181
|
},
|
@@ -188,6 +194,9 @@
|
|
188
194
|
"Meta-Llama-3.3-70B-Instruct": {
|
189
195
|
"description": "Llama 3.3 是 Llama 系列最先进的多语言开源大型语言模型,以极低成本体验媲美 405B 模型的性能。基于 Transformer 结构,并通过监督微调(SFT)和人类反馈强化学习(RLHF)提升有用性和安全性。其指令调优版本专为多语言对话优化,在多项行业基准上表现优于众多开源和封闭聊天模型。知识截止日期为 2023 年 12 月"
|
190
196
|
},
|
197
|
+
"Meta-Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
198
|
+
"description": "Llama 4 Maverick:基于 Mixture-of-Experts 的大规模模型,提供高效的专家激活策略以在推理中表现优异。"
|
199
|
+
},
|
191
200
|
"MiniMax-M1": {
|
192
201
|
"description": "全新自研推理模型。全球领先:80K思维链 x 1M输入,效果比肩海外顶尖模型。"
|
193
202
|
},
|
@@ -377,6 +386,9 @@
|
|
377
386
|
"Qwen3-235B": {
|
378
387
|
"description": "Qwen3-235B-A22B,MoE(混合专家模型)模型,引入了“混合推理模式”,支持用户在“思考模式”和“非思考模式”之间无缝切换,支持119种语言和方言理解与推理,并具备强大的工具调用能力,在综合能力、代码与数学、多语言能力、知识与推理等多项基准测试中,都能与DeepSeek R1、OpenAI o1、o3-mini、Grok 3和谷歌Gemini 2.5 Pro等目前市场上的主流大模型竞争。"
|
379
388
|
},
|
389
|
+
"Qwen3-235B-A22B-Instruct-2507-FP8": {
|
390
|
+
"description": "Qwen3 235B A22B Instruct 2507:面向高级推理与对话指令优化的模型,混合专家架构以在大规模参数下保持推理效率。"
|
391
|
+
},
|
380
392
|
"Qwen3-32B": {
|
381
393
|
"description": "Qwen3-32B,稠密模型(Dense Model),引入了“混合推理模式”,支持用户在“思考模式”和“非思考模式”之间无缝切换,由于模型架构改进、训练数据增加以及更有效的训练方法,整体性能与Qwen2.5-72B表现相当。"
|
382
394
|
},
|
@@ -1364,6 +1376,12 @@
|
|
1364
1376
|
"google/gemini-2.5-flash": {
|
1365
1377
|
"description": "Gemini 2.5 Flash 是 Google 最先进的主力模型,专为高级推理、编码、数学和科学任务而设计。它包含内置的“思考”能力,使其能够提供具有更高准确性和细致上下文处理的响应。\n\n注意:此模型有两个变体:思考和非思考。输出定价根据思考能力是否激活而有显著差异。如果您选择标准变体(不带“:thinking”后缀),模型将明确避免生成思考令牌。\n\n要利用思考能力并接收思考令牌,您必须选择“:thinking”变体,这将产生更高的思考输出定价。\n\n此外,Gemini 2.5 Flash 可通过“推理最大令牌数”参数进行配置,如文档中所述 (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)。"
|
1366
1378
|
},
|
1379
|
+
"google/gemini-2.5-flash-image-preview": {
|
1380
|
+
"description": "Gemini 2.5 Flash 实验模型,支持图像生成"
|
1381
|
+
},
|
1382
|
+
"google/gemini-2.5-flash-image-preview:free": {
|
1383
|
+
"description": "Gemini 2.5 Flash 实验模型,支持图像生成"
|
1384
|
+
},
|
1367
1385
|
"google/gemini-2.5-flash-preview": {
|
1368
1386
|
"description": "Gemini 2.5 Flash 是 Google 最先进的主力模型,专为高级推理、编码、数学和科学任务而设计。它包含内置的“思考”能力,使其能够提供具有更高准确性和细致上下文处理的响应。\n\n注意:此模型有两个变体:思考和非思考。输出定价根据思考能力是否激活而有显著差异。如果您选择标准变体(不带“:thinking”后缀),模型将明确避免生成思考令牌。\n\n要利用思考能力并接收思考令牌,您必须选择“:thinking”变体,这将产生更高的思考输出定价。\n\n此外,Gemini 2.5 Flash 可通过“推理最大令牌数”参数进行配置,如文档中所述 (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)。"
|
1369
1387
|
},
|
@@ -1535,6 +1553,9 @@
|
|
1535
1553
|
"gpt-oss": {
|
1536
1554
|
"description": "GPT-OSS 20B 是 OpenAI 发布的开源大语言模型,采用 MXFP4 量化技术,适合在高端消费级GPU或Apple Silicon Mac上运行。该模型在对话生成、代码编写和推理任务方面表现出色,支持函数调用和工具使用。"
|
1537
1555
|
},
|
1556
|
+
"gpt-oss-120b": {
|
1557
|
+
"description": "GPT-OSS-120B MXFP4 量化的 Transformer 结构,在资源受限时仍能保持强劲性能。"
|
1558
|
+
},
|
1538
1559
|
"gpt-oss:120b": {
|
1539
1560
|
"description": "GPT-OSS 120B 是 OpenAI 发布的大型开源语言模型,采用 MXFP4 量化技术,为旗舰级模型。需要多GPU或高性能工作站环境运行,在复杂推理、代码生成和多语言处理方面具备卓越性能,支持高级函数调用和工具集成。"
|
1540
1561
|
},
|
@@ -11,6 +11,9 @@
|
|
11
11
|
"aihubmix": {
|
12
12
|
"description": "AiHubMix 通过统一的 API 接口提供对多种 AI 模型的访问。"
|
13
13
|
},
|
14
|
+
"akashchat": {
|
15
|
+
"description": "Akash 是一个无需许可的云资源市场,与传统云提供商相比,其定价具有竞争力。"
|
16
|
+
},
|
14
17
|
"anthropic": {
|
15
18
|
"description": "Anthropic 是一家专注于人工智能研究和开发的公司,提供了一系列先进的语言模型,如 Claude 3.5 Sonnet、Claude 3 Sonnet、Claude 3 Opus 和 Claude 3 Haiku。这些模型在智能、速度和成本之间取得了理想的平衡,适用于从企业级工作负载到快速响应的各种应用场景。Claude 3.5 Sonnet 作为其最新模型,在多项评估中表现优异,同时保持了较高的性价比。"
|
16
19
|
},
|
package/locales/zh-TW/chat.json
CHANGED
@@ -52,7 +52,11 @@
|
|
52
52
|
"thinking": {
|
53
53
|
"title": "深度思考開關"
|
54
54
|
},
|
55
|
-
"title": "模型擴展功能"
|
55
|
+
"title": "模型擴展功能",
|
56
|
+
"urlContext": {
|
57
|
+
"desc": "開啟後將自動解析網頁連結,以取得實際網頁上下文內容",
|
58
|
+
"title": "擷取網頁連結內容"
|
59
|
+
}
|
56
60
|
},
|
57
61
|
"history": {
|
58
62
|
"title": "助手將只記住最後{{count}}條消息"
|
@@ -125,6 +129,7 @@
|
|
125
129
|
"inputWriteCached": "輸入快取寫入",
|
126
130
|
"output": "輸出",
|
127
131
|
"outputAudio": "音頻輸出",
|
132
|
+
"outputImage": "圖像輸出",
|
128
133
|
"outputText": "文本輸出",
|
129
134
|
"outputTitle": "輸出明細",
|
130
135
|
"reasoning": "深度思考",
|
package/locales/zh-TW/error.json
CHANGED
@@ -87,11 +87,11 @@
|
|
87
87
|
"FreePlanLimit": "目前為免費用戶,無法使用該功能,請升級到付費計劃後繼續使用",
|
88
88
|
"GoogleAIBlockReason": {
|
89
89
|
"BLOCKLIST": "您的內容包含被禁止的詞彙。請檢查並修改您的輸入內容後再試。",
|
90
|
-
"IMAGE_SAFETY": "
|
91
|
-
"LANGUAGE": "
|
92
|
-
"OTHER": "
|
90
|
+
"IMAGE_SAFETY": "生成的圖像內容因安全原因被阻擋。請嘗試修改您的圖像生成請求。",
|
91
|
+
"LANGUAGE": "您使用的語言暫時不被支援。請嘗試使用英文或其他受支援的語言重新提問。",
|
92
|
+
"OTHER": "內容因未知原因被阻擋。請嘗試重新表述您的請求。",
|
93
93
|
"PROHIBITED_CONTENT": "您的請求可能包含違禁內容。請調整您的請求,確保內容符合使用規範。",
|
94
|
-
"RECITATION": "
|
94
|
+
"RECITATION": "您的內容可能涉及版權問題而被阻擋。請嘗試使用原創內容或重新表述您的請求。",
|
95
95
|
"SAFETY": "您的內容因安全政策而被阻擋。請嘗試調整您的請求內容,避免包含可能有害或不當的內容。",
|
96
96
|
"SPII": "您的內容可能包含敏感個人身分資訊。為保護隱私,請移除相關敏感資訊後再試。",
|
97
97
|
"default": "內容被阻擋:{{blockReason}}。請調整您的請求內容後再試。"
|
@@ -74,6 +74,9 @@
|
|
74
74
|
"DeepSeek-V3": {
|
75
75
|
"description": "DeepSeek-V3 是一款由深度求索公司自研的MoE模型。DeepSeek-V3 多項評測成績超越了 Qwen2.5-72B 和 Llama-3.1-405B 等其他開源模型,並在性能上和世界頂尖的閉源模型 GPT-4o 以及 Claude-3.5-Sonnet 不分伯仲。"
|
76
76
|
},
|
77
|
+
"DeepSeek-V3-1": {
|
78
|
+
"description": "DeepSeek V3.1:下一代推理模型,提升了複雜推理與鏈路思考能力,適合需要深入分析的任務。"
|
79
|
+
},
|
77
80
|
"DeepSeek-V3-Fast": {
|
78
81
|
"description": "模型供應商為:sophnet平台。DeepSeek V3 Fast 是 DeepSeek V3 0324 版本的高TPS極速版,滿血非量化,代碼與數學能力更強,響應更快!"
|
79
82
|
},
|
@@ -170,6 +173,9 @@
|
|
170
173
|
"Llama-3.2-90B-Vision-Instruct\t": {
|
171
174
|
"description": "適用於視覺理解代理應用的高級圖像推理能力。"
|
172
175
|
},
|
176
|
+
"Meta-Llama-3-3-70B-Instruct": {
|
177
|
+
"description": "Llama 3.3 70B:通用性強的 Transformer 模型,適用於對話和生成任務。"
|
178
|
+
},
|
173
179
|
"Meta-Llama-3.1-405B-Instruct": {
|
174
180
|
"description": "Llama 3.1指令調優的文本模型,針對多語言對話用例進行了優化,在許多可用的開源和封閉聊天模型中,在常見行業基準上表現優異。"
|
175
181
|
},
|
@@ -188,6 +194,9 @@
|
|
188
194
|
"Meta-Llama-3.3-70B-Instruct": {
|
189
195
|
"description": "Llama 3.3 是 Llama 系列最先進的多語言開源大型語言模型,以極低成本體驗媲美 405B 模型的性能。基於 Transformer 結構,並透過監督微調(SFT)和人類反饋強化學習(RLHF)提升有用性和安全性。其指令調優版本專為多語言對話優化,在多項行業基準上表現優於眾多開源和封閉聊天模型。知識截止日期為 2023 年 12 月"
|
190
196
|
},
|
197
|
+
"Meta-Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
198
|
+
"description": "Llama 4 Maverick:基於 Mixture-of-Experts 的大規模模型,提供高效的專家啟動策略以在推理中表現優異。"
|
199
|
+
},
|
191
200
|
"MiniMax-M1": {
|
192
201
|
"description": "全新自研推理模型。全球領先:80K思維鏈 x 1M輸入,效果比肩海外頂尖模型。"
|
193
202
|
},
|
@@ -377,6 +386,9 @@
|
|
377
386
|
"Qwen3-235B": {
|
378
387
|
"description": "Qwen3-235B-A22B,MoE(混合專家模型)模型,引入了「混合推理模式」,支援用戶在「思考模式」和「非思考模式」之間無縫切換,支援119種語言和方言理解與推理,並具備強大的工具調用能力,在綜合能力、程式碼與數學、多語言能力、知識與推理等多項基準測試中,都能與DeepSeek R1、OpenAI o1、o3-mini、Grok 3和谷歌Gemini 2.5 Pro等目前市場上的主流大型模型競爭。"
|
379
388
|
},
|
389
|
+
"Qwen3-235B-A22B-Instruct-2507-FP8": {
|
390
|
+
"description": "Qwen3 235B A22B Instruct 2507:面向高級推理與對話指令優化的模型,混合專家架構以在大規模參數下保持推理效率。"
|
391
|
+
},
|
380
392
|
"Qwen3-32B": {
|
381
393
|
"description": "Qwen3-32B,稠密模型(Dense Model),引入了「混合推理模式」,支援用戶在「思考模式」和「非思考模式」之間無縫切換,由於模型架構改進、訓練資料增加以及更有效的訓練方法,整體性能與Qwen2.5-72B表現相當。"
|
382
394
|
},
|
@@ -1364,6 +1376,12 @@
|
|
1364
1376
|
"google/gemini-2.5-flash": {
|
1365
1377
|
"description": "Gemini 2.5 Flash 是 Google 最先進的主力模型,專為高階推理、編碼、數學和科學任務而設計。它包含內建的「思考」能力,使其能夠提供具有更高準確性和細緻上下文處理的回應。\n\n注意:此模型有兩個變體:思考和非思考。輸出定價根據思考能力是否啟用而有顯著差異。如果您選擇標準變體(不帶「:thinking」後綴),模型將明確避免生成思考令牌。\n\n要利用思考能力並接收思考令牌,您必須選擇「:thinking」變體,這將產生較高的思考輸出定價。\n\n此外,Gemini 2.5 Flash 可透過「推理最大令牌數」參數進行配置,如文件中所述 (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)。"
|
1366
1378
|
},
|
1379
|
+
"google/gemini-2.5-flash-image-preview": {
|
1380
|
+
"description": "Gemini 2.5 Flash 實驗模型,支援圖像生成"
|
1381
|
+
},
|
1382
|
+
"google/gemini-2.5-flash-image-preview:free": {
|
1383
|
+
"description": "Gemini 2.5 Flash 實驗模型,支援圖像生成"
|
1384
|
+
},
|
1367
1385
|
"google/gemini-2.5-flash-preview": {
|
1368
1386
|
"description": "Gemini 2.5 Flash 是 Google 最先進的主力模型,專為高級推理、編碼、數學和科學任務而設計。它包含內建的「思考」能力,使其能夠提供具有更高準確性和細緻上下文處理的回應。\n\n注意:此模型有兩個變體:思考和非思考。輸出定價根據思考能力是否啟用而有顯著差異。如果您選擇標準變體(不帶「:thinking」後綴),模型將明確避免生成思考令牌。\n\n要利用思考能力並接收思考令牌,您必須選擇「:thinking」變體,這將產生更高的思考輸出定價。\n\n此外,Gemini 2.5 Flash 可通過「推理最大令牌數」參數進行配置,如文檔中所述 (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning)。"
|
1369
1387
|
},
|
@@ -1533,6 +1551,9 @@
|
|
1533
1551
|
"gpt-oss": {
|
1534
1552
|
"description": "GPT-OSS 20B 是 OpenAI 發布的開源大型語言模型,採用 MXFP4 量化技術,適合在高端消費級 GPU 或 Apple Silicon Mac 上運行。該模型在對話生成、程式碼撰寫和推理任務方面表現出色,支援函數呼叫和工具使用。"
|
1535
1553
|
},
|
1554
|
+
"gpt-oss-120b": {
|
1555
|
+
"description": "GPT-OSS-120B MXFP4 量化的 Transformer 結構,在資源受限時仍能保持強勁性能。"
|
1556
|
+
},
|
1536
1557
|
"gpt-oss:120b": {
|
1537
1558
|
"description": "GPT-OSS 120B 是 OpenAI 發布的大型開源語言模型,採用 MXFP4 量化技術,為旗艦級模型。需要多 GPU 或高效能工作站環境運行,在複雜推理、程式碼生成和多語言處理方面具備卓越性能,支援高級函數呼叫和工具整合。"
|
1538
1559
|
},
|
@@ -11,6 +11,9 @@
|
|
11
11
|
"aihubmix": {
|
12
12
|
"description": "AiHubMix 透過統一的 API 介面提供對多種 AI 模型的存取。"
|
13
13
|
},
|
14
|
+
"akashchat": {
|
15
|
+
"description": "Akash 是一個無需許可的雲端資源市場,與傳統雲端供應商相比,其定價具有競爭力。"
|
16
|
+
},
|
14
17
|
"anthropic": {
|
15
18
|
"description": "Anthropic 是一家專注於人工智慧研究和開發的公司,提供了一系列先進的語言模型,如 Claude 3.5 Sonnet、Claude 3 Sonnet、Claude 3 Opus 和 Claude 3 Haiku。這些模型在智能、速度和成本之間取得了理想的平衡,適用於從企業級工作負載到快速響應的各種應用場景。Claude 3.5 Sonnet 作為其最新模型,在多項評估中表現優異,同時保持了較高的性價比。"
|
16
19
|
},
|
package/next.config.ts
CHANGED
@@ -9,6 +9,7 @@ const buildWithDocker = process.env.DOCKER === 'true';
|
|
9
9
|
const isDesktop = process.env.NEXT_PUBLIC_IS_DESKTOP_APP === '1';
|
10
10
|
const enableReactScan = !!process.env.REACT_SCAN_MONITOR_API_KEY;
|
11
11
|
const isUsePglite = process.env.NEXT_PUBLIC_CLIENT_DB === 'pglite';
|
12
|
+
const shouldUseCSP = process.env.ENABLED_CSP === '1';
|
12
13
|
|
13
14
|
// if you need to proxy the api endpoint to remote server
|
14
15
|
|
@@ -41,14 +42,29 @@ const nextConfig: NextConfig = {
|
|
41
42
|
webVitalsAttribution: ['CLS', 'LCP'],
|
42
43
|
},
|
43
44
|
async headers() {
|
45
|
+
const securityHeaders = [
|
46
|
+
{
|
47
|
+
key: 'x-robots-tag',
|
48
|
+
value: 'all',
|
49
|
+
},
|
50
|
+
];
|
51
|
+
|
52
|
+
if (shouldUseCSP) {
|
53
|
+
securityHeaders.push(
|
54
|
+
{
|
55
|
+
key: 'X-Frame-Options',
|
56
|
+
value: 'DENY',
|
57
|
+
},
|
58
|
+
{
|
59
|
+
key: 'Content-Security-Policy',
|
60
|
+
value: "frame-ancestors 'none';",
|
61
|
+
},
|
62
|
+
);
|
63
|
+
}
|
64
|
+
|
44
65
|
return [
|
45
66
|
{
|
46
|
-
headers:
|
47
|
-
{
|
48
|
-
key: 'x-robots-tag',
|
49
|
-
value: 'all',
|
50
|
-
},
|
51
|
-
],
|
67
|
+
headers: securityHeaders,
|
52
68
|
source: '/:path*',
|
53
69
|
},
|
54
70
|
{
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.118.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -0,0 +1,43 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { processMultiProviderModelList } from '../utils/modelParse';
|
3
|
+
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
4
|
+
|
5
|
+
export interface AkashChatModelCard {
|
6
|
+
id: string;
|
7
|
+
}
|
8
|
+
|
9
|
+
export const LobeAkashChatAI = createOpenAICompatibleRuntime({
|
10
|
+
baseURL: 'https://chatapi.akash.network/api/v1',
|
11
|
+
chatCompletion: {
|
12
|
+
handlePayload: (payload) => {
|
13
|
+
const { model, ...rest } = payload;
|
14
|
+
|
15
|
+
return {
|
16
|
+
...rest,
|
17
|
+
model,
|
18
|
+
stream: true,
|
19
|
+
} as any;
|
20
|
+
},
|
21
|
+
},
|
22
|
+
debug: {
|
23
|
+
chatCompletion: () => process.env.DEBUG_AKASH_CHAT_COMPLETION === '1',
|
24
|
+
},
|
25
|
+
models: async ({ client }) => {
|
26
|
+
try {
|
27
|
+
const modelsPage = (await client.models.list()) as any;
|
28
|
+
const rawList: any[] = modelsPage.data || [];
|
29
|
+
|
30
|
+
// Remove `created` field from each model item
|
31
|
+
const modelList: AkashChatModelCard[] = rawList.map(({ created, ...rest }) => rest);
|
32
|
+
|
33
|
+
return await processMultiProviderModelList(modelList, 'akashchat');
|
34
|
+
} catch (error) {
|
35
|
+
console.warn(
|
36
|
+
'Failed to fetch AkashChat models. Please ensure your AkashChat API key is valid:',
|
37
|
+
error,
|
38
|
+
);
|
39
|
+
return [];
|
40
|
+
}
|
41
|
+
},
|
42
|
+
provider: ModelProvider.AkashChat,
|
43
|
+
});
|
@@ -484,14 +484,28 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
484
484
|
tools: ChatCompletionTool[] | undefined,
|
485
485
|
payload?: ChatStreamPayload,
|
486
486
|
): GoogleFunctionCallTool[] | undefined {
|
487
|
-
|
488
|
-
|
487
|
+
const hasToolCalls = payload?.messages?.some((m) => m.tool_calls?.length);
|
488
|
+
const hasSearch = payload?.enabledSearch;
|
489
|
+
const hasUrlContext = payload?.urlContext;
|
490
|
+
const hasFunctionTools = tools && tools.length > 0;
|
491
|
+
|
492
|
+
// 如果已经有 tool_calls,优先处理 function declarations
|
493
|
+
if (hasToolCalls && hasFunctionTools) {
|
489
494
|
return this.buildFunctionDeclarations(tools);
|
490
495
|
}
|
491
|
-
|
496
|
+
|
497
|
+
// 构建并返回搜索相关工具(搜索工具不能与 FunctionCall 同时使用)
|
498
|
+
if (hasUrlContext && hasSearch) {
|
499
|
+
return [{ urlContext: {} }, { googleSearch: {} }];
|
500
|
+
}
|
501
|
+
if (hasUrlContext) {
|
502
|
+
return [{ urlContext: {} }];
|
503
|
+
}
|
504
|
+
if (hasSearch) {
|
492
505
|
return [{ googleSearch: {} }];
|
493
506
|
}
|
494
507
|
|
508
|
+
// 最后考虑 function declarations
|
495
509
|
return this.buildFunctionDeclarations(tools);
|
496
510
|
}
|
497
511
|
|
@@ -2,6 +2,7 @@ import { LobeAi21AI } from './ai21';
|
|
2
2
|
import { Lobe302AI } from './ai302';
|
3
3
|
import { LobeAi360AI } from './ai360';
|
4
4
|
import { LobeAiHubMixAI } from './aihubmix';
|
5
|
+
import { LobeAkashChatAI } from './akashchat';
|
5
6
|
import { LobeAnthropicAI } from './anthropic';
|
6
7
|
import { LobeAzureOpenAI } from './azureOpenai';
|
7
8
|
import { LobeAzureAI } from './azureai';
|
@@ -61,6 +62,7 @@ export const providerRuntimeMap = {
|
|
61
62
|
ai302: Lobe302AI,
|
62
63
|
ai360: LobeAi360AI,
|
63
64
|
aihubmix: LobeAiHubMixAI,
|
65
|
+
akashchat: LobeAkashChatAI,
|
64
66
|
anthropic: LobeAnthropicAI,
|
65
67
|
azure: LobeAzureOpenAI,
|
66
68
|
azureai: LobeAzureAI,
|
@@ -118,6 +118,14 @@ export const IMAGE_MODEL_KEYWORDS = [
|
|
118
118
|
'^V_1',
|
119
119
|
] as const;
|
120
120
|
|
121
|
+
// 嵌入模型关键词配置
|
122
|
+
export const EMBEDDING_MODEL_KEYWORDS = [
|
123
|
+
'embedding',
|
124
|
+
'embed',
|
125
|
+
'bge',
|
126
|
+
'm3e',
|
127
|
+
] as const;
|
128
|
+
|
121
129
|
/**
|
122
130
|
* 检测关键词列表是否匹配模型ID(支持多种匹配模式)
|
123
131
|
* @param modelId 模型ID(小写)
|
@@ -278,7 +286,12 @@ const processModelCard = (
|
|
278
286
|
IMAGE_MODEL_KEYWORDS.map((k) => k.toLowerCase()),
|
279
287
|
)
|
280
288
|
? 'image'
|
281
|
-
:
|
289
|
+
: isKeywordListMatch(
|
290
|
+
model.id.toLowerCase(),
|
291
|
+
EMBEDDING_MODEL_KEYWORDS.map((k) => k.toLowerCase()),
|
292
|
+
)
|
293
|
+
? 'embedding'
|
294
|
+
: 'chat');
|
282
295
|
|
283
296
|
// image model can't find parameters
|
284
297
|
if (modelType === 'image' && !model.parameters && !knownModel?.parameters) {
|
@@ -41,6 +41,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
|
|
41
41
|
ai302?: OpenAICompatibleKeyVault;
|
42
42
|
ai360?: OpenAICompatibleKeyVault;
|
43
43
|
aihubmix?: OpenAICompatibleKeyVault;
|
44
|
+
akashchat?: OpenAICompatibleKeyVault;
|
44
45
|
anthropic?: OpenAICompatibleKeyVault;
|
45
46
|
azure?: AzureOpenAIKeyVault;
|
46
47
|
azureai?: AzureOpenAIKeyVault;
|
@@ -4,6 +4,7 @@ import {
|
|
4
4
|
Ai21ProviderCard,
|
5
5
|
Ai302ProviderCard,
|
6
6
|
Ai360ProviderCard,
|
7
|
+
AkashChatProviderCard,
|
7
8
|
AnthropicProviderCard,
|
8
9
|
BaichuanProviderCard,
|
9
10
|
CohereProviderCard,
|
@@ -113,6 +114,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
113
114
|
GiteeAIProviderCard,
|
114
115
|
PPIOProviderCard,
|
115
116
|
InfiniAIProviderCard,
|
117
|
+
AkashChatProviderCard,
|
116
118
|
Ai302ProviderCard,
|
117
119
|
],
|
118
120
|
[
|
@@ -0,0 +1,84 @@
|
|
1
|
+
import { AIChatModelCard } from '@/types/aiModel';
|
2
|
+
|
3
|
+
const akashChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
reasoning: true,
|
8
|
+
},
|
9
|
+
contextWindowTokens: 65_536,
|
10
|
+
description:
|
11
|
+
'DeepSeek V3.1:下一代推理模型,提升了复杂推理与链路思考能力,适合需要深入分析的任务。',
|
12
|
+
displayName: 'DeepSeek V3.1',
|
13
|
+
enabled: true,
|
14
|
+
id: 'DeepSeek-V3-1',
|
15
|
+
type: 'chat',
|
16
|
+
},
|
17
|
+
{
|
18
|
+
abilities: {
|
19
|
+
functionCall: true,
|
20
|
+
reasoning: true,
|
21
|
+
},
|
22
|
+
contextWindowTokens: 65_536,
|
23
|
+
displayName: 'DeepSeek R1 Distill Qwen 32B',
|
24
|
+
id: 'DeepSeek-R1-Distill-Qwen-32B',
|
25
|
+
type: 'chat',
|
26
|
+
},
|
27
|
+
{
|
28
|
+
abilities: {
|
29
|
+
functionCall: true,
|
30
|
+
reasoning: true,
|
31
|
+
},
|
32
|
+
contextWindowTokens: 131_072,
|
33
|
+
description: 'GPT-OSS-120B MXFP4 量化的 Transformer 结构,在资源受限时仍能保持强劲性能。',
|
34
|
+
displayName: 'GPT-OSS-120B',
|
35
|
+
enabled: true,
|
36
|
+
id: 'gpt-oss-120b',
|
37
|
+
type: 'chat',
|
38
|
+
},
|
39
|
+
{
|
40
|
+
abilities: {
|
41
|
+
functionCall: true,
|
42
|
+
},
|
43
|
+
contextWindowTokens: 262_144,
|
44
|
+
description:
|
45
|
+
'Qwen3 235B A22B Instruct 2507:面向高级推理与对话指令优化的模型,混合专家架构以在大规模参数下保持推理效率。',
|
46
|
+
displayName: 'Qwen3 235B A22B Instruct 2507',
|
47
|
+
id: 'Qwen3-235B-A22B-Instruct-2507-FP8',
|
48
|
+
type: 'chat',
|
49
|
+
},
|
50
|
+
{
|
51
|
+
abilities: {
|
52
|
+
functionCall: true,
|
53
|
+
vision: true,
|
54
|
+
},
|
55
|
+
contextWindowTokens: 131_072,
|
56
|
+
description:
|
57
|
+
'Llama 4 Maverick:基于 Mixture-of-Experts 的大规模模型,提供高效的专家激活策略以在推理中表现优异。',
|
58
|
+
displayName: 'Llama 4 Maverick (17Bx128E)',
|
59
|
+
id: 'Meta-Llama-4-Maverick-17B-128E-Instruct-FP8',
|
60
|
+
type: 'chat',
|
61
|
+
},
|
62
|
+
{
|
63
|
+
abilities: {
|
64
|
+
functionCall: true,
|
65
|
+
},
|
66
|
+
contextWindowTokens: 131_072,
|
67
|
+
description: 'Llama 3.3 70B:通用性强的 Transformer 模型,适用于对话和生成任务。',
|
68
|
+
displayName: 'Llama 3.3 70B',
|
69
|
+
id: 'Meta-Llama-3-3-70B-Instruct',
|
70
|
+
type: 'chat',
|
71
|
+
},
|
72
|
+
{
|
73
|
+
abilities: {
|
74
|
+
functionCall: true,
|
75
|
+
},
|
76
|
+
contextWindowTokens: 131_072,
|
77
|
+
displayName: 'Llama 3.1 8B',
|
78
|
+
id: 'Meta-Llama-3-1-8B-Instruct-FP8',
|
79
|
+
type: 'chat',
|
80
|
+
},
|
81
|
+
];
|
82
|
+
export const allModels = [...akashChatModels];
|
83
|
+
|
84
|
+
export default allModels;
|
@@ -42,7 +42,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
42
42
|
},
|
43
43
|
releasedAt: '2025-06-17',
|
44
44
|
settings: {
|
45
|
-
extendParams: ['thinkingBudget'],
|
45
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
46
46
|
searchImpl: 'params',
|
47
47
|
searchProvider: 'google',
|
48
48
|
},
|
@@ -86,7 +86,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
86
86
|
},
|
87
87
|
releasedAt: '2025-06-05',
|
88
88
|
settings: {
|
89
|
-
extendParams: ['thinkingBudget'],
|
89
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
90
90
|
searchImpl: 'params',
|
91
91
|
searchProvider: 'google',
|
92
92
|
},
|
@@ -157,7 +157,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
157
157
|
},
|
158
158
|
releasedAt: '2025-06-17',
|
159
159
|
settings: {
|
160
|
-
extendParams: ['thinkingBudget'],
|
160
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
161
161
|
searchImpl: 'params',
|
162
162
|
searchProvider: 'google',
|
163
163
|
},
|
@@ -184,7 +184,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
184
184
|
},
|
185
185
|
releasedAt: '2025-05-20',
|
186
186
|
settings: {
|
187
|
-
extendParams: ['thinkingBudget'],
|
187
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
188
188
|
searchImpl: 'params',
|
189
189
|
searchProvider: 'google',
|
190
190
|
},
|
@@ -233,7 +233,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
233
233
|
},
|
234
234
|
releasedAt: '2025-07-22',
|
235
235
|
settings: {
|
236
|
-
extendParams: ['thinkingBudget'],
|
236
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
237
237
|
searchImpl: 'params',
|
238
238
|
searchProvider: 'google',
|
239
239
|
},
|
@@ -261,7 +261,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
261
261
|
},
|
262
262
|
releasedAt: '2025-06-11',
|
263
263
|
settings: {
|
264
|
-
extendParams: ['thinkingBudget'],
|
264
|
+
extendParams: ['thinkingBudget', 'urlContext'],
|
265
265
|
searchImpl: 'params',
|
266
266
|
searchProvider: 'google',
|
267
267
|
},
|
@@ -288,6 +288,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
288
288
|
},
|
289
289
|
releasedAt: '2025-02-05',
|
290
290
|
settings: {
|
291
|
+
extendParams: ['urlContext'],
|
291
292
|
searchImpl: 'params',
|
292
293
|
searchProvider: 'google',
|
293
294
|
},
|
@@ -314,6 +315,7 @@ const googleChatModels: AIChatModelCard[] = [
|
|
314
315
|
},
|
315
316
|
releasedAt: '2025-02-05',
|
316
317
|
settings: {
|
318
|
+
extendParams: ['urlContext'],
|
317
319
|
searchImpl: 'params',
|
318
320
|
searchProvider: 'google',
|
319
321
|
},
|
@@ -4,6 +4,7 @@ import { default as ai21 } from './ai21';
|
|
4
4
|
import { default as ai302 } from './ai302';
|
5
5
|
import { default as ai360 } from './ai360';
|
6
6
|
import { default as aihubmix } from './aihubmix';
|
7
|
+
import { default as akashchat } from './akashchat';
|
7
8
|
import { default as anthropic } from './anthropic';
|
8
9
|
import { default as azure } from './azure';
|
9
10
|
import { default as azureai } from './azureai';
|
@@ -83,6 +84,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
|
|
83
84
|
ai302,
|
84
85
|
ai360,
|
85
86
|
aihubmix,
|
87
|
+
akashchat,
|
86
88
|
anthropic,
|
87
89
|
azure,
|
88
90
|
azureai,
|
@@ -143,6 +145,7 @@ export { default as ai21 } from './ai21';
|
|
143
145
|
export { default as ai302 } from './ai302';
|
144
146
|
export { default as ai360 } from './ai360';
|
145
147
|
export { default as aihubmix } from './aihubmix';
|
148
|
+
export { default as akashchat } from './akashchat';
|
146
149
|
export { default as anthropic } from './anthropic';
|
147
150
|
export { default as azure } from './azure';
|
148
151
|
export { default as azureai } from './azureai';
|