@lobehub/chat 1.110.1 → 1.110.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +34 -0
- package/Dockerfile +1 -1
- package/apps/desktop/.i18nrc.js +2 -1
- package/apps/desktop/package.json +1 -2
- package/apps/desktop/resources/locales/ar/menu.json +1 -0
- package/apps/desktop/resources/locales/bg-BG/menu.json +1 -0
- package/apps/desktop/resources/locales/de-DE/menu.json +1 -0
- package/apps/desktop/resources/locales/en-US/menu.json +1 -0
- package/apps/desktop/resources/locales/es-ES/menu.json +1 -0
- package/apps/desktop/resources/locales/fa-IR/menu.json +1 -0
- package/apps/desktop/resources/locales/fr-FR/menu.json +1 -0
- package/apps/desktop/resources/locales/it-IT/menu.json +1 -0
- package/apps/desktop/resources/locales/ja-JP/menu.json +1 -0
- package/apps/desktop/resources/locales/ko-KR/menu.json +1 -0
- package/apps/desktop/resources/locales/nl-NL/menu.json +1 -0
- package/apps/desktop/resources/locales/pl-PL/menu.json +1 -0
- package/apps/desktop/resources/locales/pt-BR/menu.json +1 -0
- package/apps/desktop/resources/locales/ru-RU/menu.json +1 -0
- package/apps/desktop/resources/locales/tr-TR/menu.json +1 -0
- package/apps/desktop/resources/locales/vi-VN/menu.json +1 -0
- package/apps/desktop/resources/locales/zh-CN/menu.json +1 -0
- package/apps/desktop/resources/locales/zh-TW/menu.json +1 -0
- package/apps/desktop/src/main/controllers/MenuCtr.ts +2 -2
- package/apps/desktop/src/main/locales/default/menu.ts +1 -0
- package/apps/desktop/src/main/menus/impls/linux.ts +9 -24
- package/apps/desktop/src/main/menus/impls/macOS.ts +9 -28
- package/apps/desktop/src/main/menus/impls/windows.ts +9 -27
- package/changelog/v1.json +12 -0
- package/locales/ar/modelProvider.json +10 -1
- package/locales/ar/models.json +19 -1
- package/locales/bg-BG/modelProvider.json +10 -1
- package/locales/bg-BG/models.json +19 -1
- package/locales/de-DE/modelProvider.json +10 -1
- package/locales/de-DE/models.json +19 -1
- package/locales/en-US/modelProvider.json +10 -1
- package/locales/en-US/models.json +19 -1
- package/locales/es-ES/modelProvider.json +10 -1
- package/locales/es-ES/models.json +19 -1
- package/locales/fa-IR/modelProvider.json +10 -1
- package/locales/fa-IR/models.json +19 -1
- package/locales/fr-FR/modelProvider.json +10 -1
- package/locales/fr-FR/models.json +19 -1
- package/locales/it-IT/modelProvider.json +10 -1
- package/locales/it-IT/models.json +19 -1
- package/locales/ja-JP/modelProvider.json +10 -1
- package/locales/ja-JP/models.json +19 -1
- package/locales/ko-KR/modelProvider.json +10 -1
- package/locales/ko-KR/models.json +19 -1
- package/locales/nl-NL/modelProvider.json +10 -1
- package/locales/nl-NL/models.json +19 -1
- package/locales/pl-PL/modelProvider.json +10 -1
- package/locales/pl-PL/models.json +19 -1
- package/locales/pt-BR/modelProvider.json +10 -1
- package/locales/pt-BR/models.json +19 -1
- package/locales/ru-RU/modelProvider.json +10 -1
- package/locales/ru-RU/models.json +19 -1
- package/locales/tr-TR/modelProvider.json +10 -1
- package/locales/tr-TR/models.json +19 -1
- package/locales/vi-VN/modelProvider.json +10 -1
- package/locales/vi-VN/models.json +19 -1
- package/locales/zh-CN/modelProvider.json +10 -1
- package/locales/zh-CN/models.json +19 -1
- package/locales/zh-TW/modelProvider.json +10 -1
- package/locales/zh-TW/models.json +19 -1
- package/package.json +1 -1
- package/packages/electron-client-ipc/src/events/menu.ts +1 -1
- package/packages/types/package.json +3 -0
- package/packages/types/src/agent/chatConfig.ts +1 -1
- package/packages/types/src/agent/index.ts +3 -4
- package/packages/types/src/discover/assistants.ts +3 -3
- package/packages/types/src/message/chat.ts +4 -4
- package/src/app/(backend)/_deprecated/createBizOpenAI/auth.ts +2 -1
- package/src/app/(backend)/_deprecated/createBizOpenAI/createAzureOpenai.ts +1 -1
- package/src/app/(backend)/_deprecated/createBizOpenAI/createOpenai.ts +1 -1
- package/src/app/(backend)/_deprecated/createBizOpenAI/index.ts +1 -1
- package/src/app/(backend)/middleware/auth/index.test.ts +1 -1
- package/src/app/(backend)/middleware/auth/index.ts +1 -1
- package/src/app/(backend)/middleware/auth/utils.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.ts +2 -1
- package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +2 -1
- package/src/app/(backend)/webapi/models/[provider]/route.ts +1 -1
- package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -1
- package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +1 -1
- package/src/app/[variants]/(main)/settings/provider/features/ModelList/DisabledModels.tsx +15 -5
- package/src/app/[variants]/(main)/settings/provider/features/ModelList/EnabledModelList/index.tsx +22 -4
- package/src/app/[variants]/(main)/settings/provider/features/ModelList/index.tsx +99 -3
- package/src/config/modelProviders/ai302.ts +1 -0
- package/src/config/modelProviders/openai.ts +1 -0
- package/src/features/ChatInput/Desktop/InputArea/index.tsx +14 -0
- package/src/features/Conversation/Error/index.tsx +1 -1
- package/src/features/Conversation/components/ChatItem/index.tsx +18 -1
- package/src/features/Portal/Artifacts/Header.tsx +1 -1
- package/src/libs/model-runtime/RouterRuntime/createRuntime.ts +2 -2
- package/src/locales/default/modelProvider.ts +9 -0
- package/src/services/__tests__/chat.test.ts +1 -1
- package/src/services/chat.ts +1 -1
- package/src/services/electron/system.ts +3 -1
- package/src/store/chat/slices/message/action.ts +1 -1
- package/src/store/chat/slices/plugin/action.ts +1 -1
- package/src/utils/errorResponse.test.ts +1 -1
- package/src/utils/errorResponse.ts +2 -1
- package/src/utils/fetch/__tests__/parseError.test.ts +1 -2
- package/src/utils/fetch/fetchSSE.ts +2 -1
- package/src/utils/fetch/parseError.ts +1 -1
- package/tsconfig.json +1 -1
- package/.dockerignore +0 -9
- package/packages/file-loaders/src/loaders/docx/fixtures/test.docx +0 -0
- package/packages/file-loaders/src/loaders/excel/fixtures/test.xlsx +0 -0
- package/packages/file-loaders/src/loaders/pptx/fixtures/test.pptx +0 -0
- package/packages/file-loaders/test/fixtures/test.docx +0 -0
- package/packages/file-loaders/test/fixtures/test.pptx +0 -0
- /package/{packages/types/src → src/types}/next.ts +0 -0
@@ -306,6 +306,7 @@
|
|
306
306
|
"latestTime": "上次更新时间:{{time}}",
|
307
307
|
"noLatestTime": "暂未获取列表"
|
308
308
|
},
|
309
|
+
"noModelsInCategory": "该分类下暂无启用的模型",
|
309
310
|
"resetAll": {
|
310
311
|
"conform": "确认重置当前模型的所有修改?重置后当前模型列表将会回到默认状态",
|
311
312
|
"success": "重置成功",
|
@@ -316,7 +317,15 @@
|
|
316
317
|
"title": "模型列表",
|
317
318
|
"total": "共 {{count}} 个模型可用"
|
318
319
|
},
|
319
|
-
"searchNotFound": "未找到搜索结果"
|
320
|
+
"searchNotFound": "未找到搜索结果",
|
321
|
+
"tabs": {
|
322
|
+
"all": "全部",
|
323
|
+
"chat": "对话",
|
324
|
+
"embedding": "向量化",
|
325
|
+
"image": "图片",
|
326
|
+
"stt": "ASR",
|
327
|
+
"tts": "TTS"
|
328
|
+
}
|
320
329
|
},
|
321
330
|
"sortModal": {
|
322
331
|
"success": "排序更新成功",
|
@@ -74,6 +74,9 @@
|
|
74
74
|
"DeepSeek-V3": {
|
75
75
|
"description": "DeepSeek-V3 是一款由深度求索公司自研的MoE模型。DeepSeek-V3 多项评测成绩超越了 Qwen2.5-72B 和 Llama-3.1-405B 等其他开源模型,并在性能上和世界顶尖的闭源模型 GPT-4o 以及 Claude-3.5-Sonnet 不分伯仲。"
|
76
76
|
},
|
77
|
+
"DeepSeek-V3-Fast": {
|
78
|
+
"description": "模型供应商为:sophnet平台。DeepSeek V3 Fast 是 DeepSeek V3 0324 版本的高TPS极速版,满血非量化,代码与数学能力更强,响应更快!"
|
79
|
+
},
|
77
80
|
"Doubao-lite-128k": {
|
78
81
|
"description": "Doubao-lite 拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持128k上下文窗口的推理和精调。"
|
79
82
|
},
|
@@ -608,6 +611,9 @@
|
|
608
611
|
"aya:35b": {
|
609
612
|
"description": "Aya 23 是 Cohere 推出的多语言模型,支持 23 种语言,为多元化语言应用提供便利。"
|
610
613
|
},
|
614
|
+
"azure-DeepSeek-R1-0528": {
|
615
|
+
"description": "由微软部署提供; DeepSeek R1型号已进行小版本升级,当前版本为DeepSeek-R1-0528。在最新的更新中,DeepSeek R1通过增加计算资源和引入后训练阶段的算法优化机制,大幅提升了推理深度和推断能力。该模型在数学、编程和通用逻辑等多个基准测试中表现出色,其整体性能已接近领先模型,如O3和Gemini 2.5 Pro 。"
|
616
|
+
},
|
611
617
|
"baichuan/baichuan2-13b-chat": {
|
612
618
|
"description": "Baichuan-13B 百川智能开发的包含 130 亿参数的开源可商用的大规模语言模型,在权威的中文和英文 benchmark 上均取得同尺寸最好的效果"
|
613
619
|
},
|
@@ -668,6 +674,9 @@
|
|
668
674
|
"claude-3-sonnet-20240229": {
|
669
675
|
"description": "Claude 3 Sonnet 在智能和速度方面为企业工作负载提供了理想的平衡。它以更低的价格提供最大效用,可靠且适合大规模部署。"
|
670
676
|
},
|
677
|
+
"claude-opus-4-1-20250805": {
|
678
|
+
"description": "Claude Opus 4.1 是 Anthropic 最新的用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。"
|
679
|
+
},
|
671
680
|
"claude-opus-4-20250514": {
|
672
681
|
"description": "Claude Opus 4 是 Anthropic 用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。"
|
673
682
|
},
|
@@ -2123,6 +2132,12 @@
|
|
2123
2132
|
"openai/gpt-4o-mini": {
|
2124
2133
|
"description": "GPT-4o mini是OpenAI在GPT-4 Omni之后推出的最新模型,支持图文输入并输出文本。作为他们最先进的小型模型,它比其他近期的前沿模型便宜很多,并且比GPT-3.5 Turbo便宜超过60%。它保持了最先进的智能,同时具有显著的性价比。GPT-4o mini在MMLU测试中获得了 82% 的得分,目前在聊天偏好上排名高于 GPT-4。"
|
2125
2134
|
},
|
2135
|
+
"openai/gpt-oss-120b": {
|
2136
|
+
"description": "OpenAI GPT-OSS 120B 是一款拥有 1200 亿参数的顶尖语言模型,内置浏览器搜索和代码执行功能,并具备推理能力。"
|
2137
|
+
},
|
2138
|
+
"openai/gpt-oss-20b": {
|
2139
|
+
"description": "OpenAI GPT-OSS 20B 是一款拥有 200 亿参数的顶尖语言模型,内置浏览器搜索和代码执行功能,并具备推理能力。"
|
2140
|
+
},
|
2126
2141
|
"openai/o1": {
|
2127
2142
|
"description": "o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。"
|
2128
2143
|
},
|
@@ -2411,8 +2426,11 @@
|
|
2411
2426
|
"qwen3-coder-480b-a35b-instruct": {
|
2412
2427
|
"description": "通义千问代码模型开源版。最新的 qwen3-coder-480b-a35b-instruct 是基于 Qwen3 的代码生成模型,具有强大的Coding Agent能力,擅长工具调用和环境交互,能够实现自主编程、代码能力卓越的同时兼具通用能力。"
|
2413
2428
|
},
|
2429
|
+
"qwen3-coder-flash": {
|
2430
|
+
"description": "通义千问代码模型。最新的 Qwen3-Coder 系列模型是基于 Qwen3 的代码生成模型,具有强大的Coding Agent能力,擅长工具调用和环境交互,能够实现自主编程,代码能力卓越的同时兼具通用能力。"
|
2431
|
+
},
|
2414
2432
|
"qwen3-coder-plus": {
|
2415
|
-
"description": "通义千问代码模型。最新的 Qwen3-Coder
|
2433
|
+
"description": "通义千问代码模型。最新的 Qwen3-Coder 系列模型是基于 Qwen3 的代码生成模型,具有强大的Coding Agent能力,擅长工具调用和环境交互,能够实现自主编程,代码能力卓越的同时兼具通用能力。"
|
2416
2434
|
},
|
2417
2435
|
"qwq": {
|
2418
2436
|
"description": "QwQ 是 Qwen 系列的推理模型。与传统的指令调优模型相比,QwQ 具备思考和推理的能力,能够在下游任务中,尤其是困难问题上,显著提升性能。QwQ-32B 是中型推理模型,能够在与最先进的推理模型(如 DeepSeek-R1、o1-mini)竞争时取得可观的表现。"
|
@@ -306,6 +306,7 @@
|
|
306
306
|
"latestTime": "上次更新時間:{{time}}",
|
307
307
|
"noLatestTime": "尚未取得列表"
|
308
308
|
},
|
309
|
+
"noModelsInCategory": "該分類下暫無啟用的模型",
|
309
310
|
"resetAll": {
|
310
311
|
"conform": "確認重置當前模型的所有修改?重置後當前模型列表將會回到預設狀態",
|
311
312
|
"success": "重置成功",
|
@@ -316,7 +317,15 @@
|
|
316
317
|
"title": "模型列表",
|
317
318
|
"total": "共 {{count}} 個模型可用"
|
318
319
|
},
|
319
|
-
"searchNotFound": "未找到搜尋結果"
|
320
|
+
"searchNotFound": "未找到搜尋結果",
|
321
|
+
"tabs": {
|
322
|
+
"all": "全部",
|
323
|
+
"chat": "對話",
|
324
|
+
"embedding": "向量化",
|
325
|
+
"image": "圖片",
|
326
|
+
"stt": "ASR",
|
327
|
+
"tts": "TTS"
|
328
|
+
}
|
320
329
|
},
|
321
330
|
"sortModal": {
|
322
331
|
"success": "排序更新成功",
|
@@ -74,6 +74,9 @@
|
|
74
74
|
"DeepSeek-V3": {
|
75
75
|
"description": "DeepSeek-V3 是一款由深度求索公司自研的MoE模型。DeepSeek-V3 多項評測成績超越了 Qwen2.5-72B 和 Llama-3.1-405B 等其他開源模型,並在性能上和世界頂尖的閉源模型 GPT-4o 以及 Claude-3.5-Sonnet 不分伯仲。"
|
76
76
|
},
|
77
|
+
"DeepSeek-V3-Fast": {
|
78
|
+
"description": "模型供應商為:sophnet平台。DeepSeek V3 Fast 是 DeepSeek V3 0324 版本的高TPS極速版,滿血非量化,代碼與數學能力更強,響應更快!"
|
79
|
+
},
|
77
80
|
"Doubao-lite-128k": {
|
78
81
|
"description": "Doubao-lite 擁有極致的響應速度,更好的性價比,為客戶不同場景提供更靈活的選擇。支持128k上下文視窗的推理和精調。"
|
79
82
|
},
|
@@ -608,6 +611,9 @@
|
|
608
611
|
"aya:35b": {
|
609
612
|
"description": "Aya 23 是 Cohere 推出的多語言模型,支持 23 種語言,為多元化語言應用提供便利。"
|
610
613
|
},
|
614
|
+
"azure-DeepSeek-R1-0528": {
|
615
|
+
"description": "由微軟部署提供; DeepSeek R1型號已進行小版本升級,當前版本為DeepSeek-R1-0528。在最新的更新中,DeepSeek R1透過增加計算資源和引入後訓練階段的演算法優化機制,大幅提升了推理深度和推斷能力。該模型在數學、程式設計和通用邏輯等多個基準測試中表現出色,其整體性能已接近領先模型,如O3和Gemini 2.5 Pro 。"
|
616
|
+
},
|
611
617
|
"baichuan/baichuan2-13b-chat": {
|
612
618
|
"description": "Baichuan-13B百川智能開發的包含130億參數的開源可商用的大規模語言模型,在權威的中文和英文benchmark上均取得同尺寸最好的效果。"
|
613
619
|
},
|
@@ -668,6 +674,9 @@
|
|
668
674
|
"claude-3-sonnet-20240229": {
|
669
675
|
"description": "Claude 3 Sonnet 在智能和速度方面為企業工作負載提供了理想的平衡。它以更低的價格提供最大效用,可靠且適合大規模部署。"
|
670
676
|
},
|
677
|
+
"claude-opus-4-1-20250805": {
|
678
|
+
"description": "Claude Opus 4.1 是 Anthropic 最新的用於處理高度複雜任務的最強大模型。它在性能、智慧、流暢性和理解力方面表現卓越。"
|
679
|
+
},
|
671
680
|
"claude-opus-4-20250514": {
|
672
681
|
"description": "Claude Opus 4 是 Anthropic 用於處理高度複雜任務的最強大模型。它在性能、智能、流暢性和理解力方面表現卓越。"
|
673
682
|
},
|
@@ -2123,6 +2132,12 @@
|
|
2123
2132
|
"openai/gpt-4o-mini": {
|
2124
2133
|
"description": "GPT-4o mini是OpenAI在GPT-4 Omni之後推出的最新模型,支持圖文輸入並輸出文本。作為他們最先進的小型模型,它比其他近期的前沿模型便宜很多,並且比GPT-3.5 Turbo便宜超過60%。它保持了最先進的智能,同時具有顯著的性價比。GPT-4o mini在MMLU測試中獲得了82%的得分,目前在聊天偏好上排名高於GPT-4。"
|
2125
2134
|
},
|
2135
|
+
"openai/gpt-oss-120b": {
|
2136
|
+
"description": "OpenAI GPT-OSS 120B 是一款擁有 1200 億參數的頂尖語言模型,內建瀏覽器搜尋和程式碼執行功能,並具備推理能力。"
|
2137
|
+
},
|
2138
|
+
"openai/gpt-oss-20b": {
|
2139
|
+
"description": "OpenAI GPT-OSS 20B 是一款擁有 200 億參數的頂尖語言模型,內建瀏覽器搜尋和程式碼執行功能,並具備推理能力。"
|
2140
|
+
},
|
2126
2141
|
"openai/o1": {
|
2127
2142
|
"description": "o1 是 OpenAI 新的推理模型,支援圖文輸入並輸出文本,適用於需要廣泛通用知識的複雜任務。該模型具有 200K 上下文和 2023 年 10 月的知識截止日期。"
|
2128
2143
|
},
|
@@ -2411,8 +2426,11 @@
|
|
2411
2426
|
"qwen3-coder-480b-a35b-instruct": {
|
2412
2427
|
"description": "通義千問程式碼模型開源版。最新的 qwen3-coder-480b-a35b-instruct 是基於 Qwen3 的程式碼生成模型,具有強大的 Coding Agent 能力,擅長工具調用和環境互動,能夠實現自主程式設計、程式碼能力卓越的同時兼具通用能力。"
|
2413
2428
|
},
|
2429
|
+
"qwen3-coder-flash": {
|
2430
|
+
"description": "通義千問程式碼模型。最新的 Qwen3-Coder 系列模型是基於 Qwen3 的程式碼生成模型,具有強大的Coding Agent能力,擅長工具調用和環境互動,能夠實現自主程式設計,程式碼能力卓越的同時兼具通用能力。"
|
2431
|
+
},
|
2414
2432
|
"qwen3-coder-plus": {
|
2415
|
-
"description": "通義千問程式碼模型。最新的 Qwen3-Coder
|
2433
|
+
"description": "通義千問程式碼模型。最新的 Qwen3-Coder 系列模型是基於 Qwen3 的程式碼生成模型,具有強大的Coding Agent能力,擅長工具調用和環境互動,能夠實現自主程式設計,程式碼能力卓越的同時兼具通用能力。"
|
2416
2434
|
},
|
2417
2435
|
"qwq": {
|
2418
2436
|
"description": "QwQ 是一個實驗研究模型,專注於提高 AI 推理能力。"
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.110.
|
3
|
+
"version": "1.110.2",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,5 +1,5 @@
|
|
1
1
|
export interface MenuDispatchEvents {
|
2
2
|
refreshAppMenu: () => { success: boolean };
|
3
3
|
setDevMenuVisibility: (visible: boolean) => { success: boolean };
|
4
|
-
showContextMenu: (
|
4
|
+
showContextMenu: (params: { data?: any; type: string }) => { success: boolean };
|
5
5
|
}
|
@@ -1,7 +1,6 @@
|
|
1
|
-
import { FileItem } from '
|
2
|
-
import { KnowledgeBaseItem } from '
|
3
|
-
import { FewShots, LLMParams } from '
|
4
|
-
|
1
|
+
import { FileItem } from '../files';
|
2
|
+
import { KnowledgeBaseItem } from '../knowledgeBase';
|
3
|
+
import { FewShots, LLMParams } from '../llm';
|
5
4
|
import { LobeAgentChatConfig } from './chatConfig';
|
6
5
|
|
7
6
|
export type TTSServer = 'openai' | 'edge' | 'microsoft';
|
@@ -1,6 +1,6 @@
|
|
1
|
-
import { FewShots } from '
|
2
|
-
import { MetaData } from '
|
3
|
-
import { LobeAgentSettings } from '
|
1
|
+
import { FewShots } from '../llm';
|
2
|
+
import { MetaData } from '../meta';
|
3
|
+
import { LobeAgentSettings } from '../session';
|
4
4
|
|
5
5
|
export enum AssistantCategory {
|
6
6
|
Academic = 'academic',
|
@@ -1,11 +1,11 @@
|
|
1
1
|
import { IPluginErrorType } from '@lobehub/chat-plugin-sdk';
|
2
2
|
|
3
3
|
import { ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
|
4
|
-
import { ErrorType } from '@/types/fetch';
|
5
|
-
import { MetaData } from '@/types/meta';
|
6
|
-
import { MessageSemanticSearchChunk } from '@/types/rag';
|
7
|
-
import { GroundingSearch } from '@/types/search';
|
8
4
|
|
5
|
+
import { ErrorType } from '../fetch';
|
6
|
+
import { MetaData } from '../meta';
|
7
|
+
import { MessageSemanticSearchChunk } from '../rag';
|
8
|
+
import { GroundingSearch } from '../search';
|
9
9
|
import { MessageMetadata, MessageRoleType, ModelReasoning } from './base';
|
10
10
|
import { ChatImageItem } from './image';
|
11
11
|
import { ChatPluginPayload, ChatToolPayload } from './tools';
|
@@ -1,8 +1,8 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
1
2
|
import OpenAI, { ClientOptions } from 'openai';
|
2
3
|
import urlJoin from 'url-join';
|
3
4
|
|
4
5
|
import { getLLMConfig } from '@/config/llm';
|
5
|
-
import { ChatErrorType } from '@/types/fetch';
|
6
6
|
|
7
7
|
// create Azure OpenAI Instance
|
8
8
|
export const createAzureOpenai = (params: {
|
@@ -1,7 +1,7 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
1
2
|
import OpenAI from 'openai';
|
2
3
|
|
3
4
|
import { getLLMConfig } from '@/config/llm';
|
4
|
-
import { ChatErrorType } from '@/types/fetch';
|
5
5
|
|
6
6
|
// create OpenAI instance
|
7
7
|
export const createOpenai = (userApiKey: string | null, endpoint?: string | null) => {
|
@@ -1,7 +1,7 @@
|
|
1
|
+
import { ChatErrorType, ErrorType } from '@lobechat/types/fetch';
|
1
2
|
import OpenAI from 'openai';
|
2
3
|
|
3
4
|
import { getOpenAIAuthFromRequest } from '@/const/fetch';
|
4
|
-
import { ChatErrorType, ErrorType } from '@/types/fetch';
|
5
5
|
import { createErrorResponse } from '@/utils/errorResponse';
|
6
6
|
|
7
7
|
import { checkAuth } from './auth';
|
@@ -1,7 +1,7 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
1
2
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
2
3
|
|
3
4
|
import { AgentRuntimeError } from '@/libs/model-runtime';
|
4
|
-
import { ChatErrorType } from '@/types/fetch';
|
5
5
|
import { createErrorResponse } from '@/utils/errorResponse';
|
6
6
|
import { getXorPayload } from '@/utils/server/xor';
|
7
7
|
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import { AuthObject } from '@clerk/backend';
|
2
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
2
3
|
import { NextRequest } from 'next/server';
|
3
4
|
|
4
5
|
import {
|
@@ -11,7 +12,6 @@ import {
|
|
11
12
|
import { ClerkAuth } from '@/libs/clerk-auth';
|
12
13
|
import { AgentRuntimeError, ChatCompletionErrorPayload, ModelRuntime } from '@/libs/model-runtime';
|
13
14
|
import { validateOIDCJWT } from '@/libs/oidc-provider/jwt';
|
14
|
-
import { ChatErrorType } from '@/types/fetch';
|
15
15
|
import { createErrorResponse } from '@/utils/errorResponse';
|
16
16
|
import { getXorPayload } from '@/utils/server/xor';
|
17
17
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
import { type AuthObject } from '@clerk/backend';
|
2
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
2
3
|
|
3
4
|
import { enableClerk, enableNextAuth } from '@/const/auth';
|
4
5
|
import { getAppConfig } from '@/envs/app';
|
5
6
|
import { AgentRuntimeError } from '@/libs/model-runtime';
|
6
|
-
import { ChatErrorType } from '@/types/fetch';
|
7
7
|
|
8
8
|
interface CheckAuthParams {
|
9
9
|
accessCode?: string;
|
@@ -1,11 +1,11 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { getAuth } from '@clerk/nextjs/server';
|
3
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
3
4
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
5
|
|
5
6
|
import { checkAuthMethod } from '@/app/(backend)/middleware/auth/utils';
|
6
7
|
import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED } from '@/const/auth';
|
7
8
|
import { LobeRuntimeAI, ModelRuntime } from '@/libs/model-runtime';
|
8
|
-
import { ChatErrorType } from '@/types/fetch';
|
9
9
|
import { getXorPayload } from '@/utils/server/xor';
|
10
10
|
|
11
11
|
import { POST } from './route';
|
@@ -1,3 +1,5 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
2
|
+
|
1
3
|
import { checkAuth } from '@/app/(backend)/middleware/auth';
|
2
4
|
import {
|
3
5
|
AGENT_RUNTIME_ERROR_SET,
|
@@ -5,7 +7,6 @@ import {
|
|
5
7
|
ModelRuntime,
|
6
8
|
} from '@/libs/model-runtime';
|
7
9
|
import { createTraceOptions, initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
|
8
|
-
import { ChatErrorType } from '@/types/fetch';
|
9
10
|
import { ChatStreamPayload } from '@/types/openai/chat';
|
10
11
|
import { createErrorResponse } from '@/utils/errorResponse';
|
11
12
|
import { getTracePayload } from '@/utils/trace';
|
@@ -1,7 +1,8 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
2
|
+
|
1
3
|
import { checkAuth } from '@/app/(backend)/middleware/auth';
|
2
4
|
import { ChatCompletionErrorPayload, PullModelParams } from '@/libs/model-runtime';
|
3
5
|
import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
|
4
|
-
import { ChatErrorType } from '@/types/fetch';
|
5
6
|
import { createErrorResponse } from '@/utils/errorResponse';
|
6
7
|
|
7
8
|
export const runtime = 'edge';
|
@@ -1,9 +1,9 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
1
2
|
import { NextResponse } from 'next/server';
|
2
3
|
|
3
4
|
import { checkAuth } from '@/app/(backend)/middleware/auth';
|
4
5
|
import { ChatCompletionErrorPayload, ModelProvider } from '@/libs/model-runtime';
|
5
6
|
import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
|
6
|
-
import { ChatErrorType } from '@/types/fetch';
|
7
7
|
import { createErrorResponse } from '@/utils/errorResponse';
|
8
8
|
|
9
9
|
export const runtime = 'edge';
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import { ChatErrorType, ErrorType } from '@lobechat/types/fetch';
|
1
2
|
import { PluginRequestPayload } from '@lobehub/chat-plugin-sdk';
|
2
3
|
import { createGatewayOnEdgeRuntime } from '@lobehub/chat-plugins-gateway';
|
3
4
|
|
@@ -6,7 +7,6 @@ import { LOBE_CHAT_TRACE_ID, TraceNameMap } from '@/const/trace';
|
|
6
7
|
import { getAppConfig } from '@/envs/app';
|
7
8
|
import { AgentRuntimeError } from '@/libs/model-runtime';
|
8
9
|
import { TraceClient } from '@/libs/traces';
|
9
|
-
import { ChatErrorType, ErrorType } from '@/types/fetch';
|
10
10
|
import { createErrorResponse } from '@/utils/errorResponse';
|
11
11
|
import { getXorPayload } from '@/utils/server/xor';
|
12
12
|
import { getTracePayload } from '@/utils/trace';
|
@@ -1,10 +1,10 @@
|
|
1
|
+
import { ChatErrorType } from '@lobechat/types/fetch';
|
1
2
|
import { NextResponse } from 'next/server';
|
2
3
|
|
3
4
|
import { checkAuth } from '@/app/(backend)/middleware/auth';
|
4
5
|
import { ChatCompletionErrorPayload } from '@/libs/model-runtime';
|
5
6
|
import { TextToImagePayload } from '@/libs/model-runtime/types';
|
6
7
|
import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
|
7
|
-
import { ChatErrorType } from '@/types/fetch';
|
8
8
|
import { createErrorResponse } from '@/utils/errorResponse';
|
9
9
|
|
10
10
|
export const runtime = 'edge';
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { Button, Text } from '@lobehub/ui';
|
2
2
|
import isEqual from 'fast-deep-equal';
|
3
3
|
import { ChevronDown } from 'lucide-react';
|
4
|
-
import { memo, useState } from 'react';
|
4
|
+
import { memo, useMemo, useState } from 'react';
|
5
5
|
import { useTranslation } from 'react-i18next';
|
6
6
|
import { Flexbox } from 'react-layout-kit';
|
7
7
|
|
@@ -10,16 +10,26 @@ import { aiModelSelectors } from '@/store/aiInfra/selectors';
|
|
10
10
|
|
11
11
|
import ModelItem from './ModelItem';
|
12
12
|
|
13
|
-
|
13
|
+
interface DisabledModelsProps {
|
14
|
+
activeTab: string;
|
15
|
+
}
|
16
|
+
|
17
|
+
const DisabledModels = memo<DisabledModelsProps>(({ activeTab }) => {
|
14
18
|
const { t } = useTranslation('modelProvider');
|
15
19
|
|
16
20
|
const [showMore, setShowMore] = useState(false);
|
17
21
|
const disabledModels = useAiInfraStore(aiModelSelectors.disabledAiProviderModelList, isEqual);
|
18
22
|
|
19
|
-
|
23
|
+
// Filter models based on active tab
|
24
|
+
const filteredDisabledModels = useMemo(() => {
|
25
|
+
if (activeTab === 'all') return disabledModels;
|
26
|
+
return disabledModels.filter((model) => model.type === activeTab);
|
27
|
+
}, [disabledModels, activeTab]);
|
28
|
+
|
29
|
+
const displayModels = showMore ? filteredDisabledModels : filteredDisabledModels.slice(0, 10);
|
20
30
|
|
21
31
|
return (
|
22
|
-
|
32
|
+
filteredDisabledModels.length > 0 && (
|
23
33
|
<Flexbox>
|
24
34
|
<Text style={{ fontSize: 12, marginTop: 8 }} type={'secondary'}>
|
25
35
|
{t('providerModels.list.disabled')}
|
@@ -27,7 +37,7 @@ const DisabledModels = memo(() => {
|
|
27
37
|
{displayModels.map((item) => (
|
28
38
|
<ModelItem {...item} key={item.id} />
|
29
39
|
))}
|
30
|
-
{!showMore &&
|
40
|
+
{!showMore && filteredDisabledModels.length > 10 && (
|
31
41
|
<Button
|
32
42
|
block
|
33
43
|
icon={ChevronDown}
|
package/src/app/[variants]/(main)/settings/provider/features/ModelList/EnabledModelList/index.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ActionIcon, Text } from '@lobehub/ui';
|
2
2
|
import isEqual from 'fast-deep-equal';
|
3
3
|
import { ArrowDownUpIcon, ToggleLeft } from 'lucide-react';
|
4
|
-
import { useState } from 'react';
|
4
|
+
import { useMemo, useState } from 'react';
|
5
5
|
import { useTranslation } from 'react-i18next';
|
6
6
|
import { Center, Flexbox } from 'react-layout-kit';
|
7
7
|
|
@@ -11,7 +11,11 @@ import { aiModelSelectors } from '@/store/aiInfra/selectors';
|
|
11
11
|
import ModelItem from '../ModelItem';
|
12
12
|
import SortModelModal from '../SortModelModal';
|
13
13
|
|
14
|
-
|
14
|
+
interface EnabledModelListProps {
|
15
|
+
activeTab: string;
|
16
|
+
}
|
17
|
+
|
18
|
+
const EnabledModelList = ({ activeTab }: EnabledModelListProps) => {
|
15
19
|
const { t } = useTranslation('modelProvider');
|
16
20
|
|
17
21
|
const enabledModels = useAiInfraStore(aiModelSelectors.enabledAiProviderModelList, isEqual);
|
@@ -20,6 +24,14 @@ const EnabledModelList = () => {
|
|
20
24
|
const [batchLoading, setBatchLoading] = useState(false);
|
21
25
|
|
22
26
|
const isEmpty = enabledModels.length === 0;
|
27
|
+
|
28
|
+
// Filter models based on active tab
|
29
|
+
const filteredModels = useMemo(() => {
|
30
|
+
if (activeTab === 'all') return enabledModels;
|
31
|
+
return enabledModels.filter((model) => model.type === activeTab);
|
32
|
+
}, [enabledModels, activeTab]);
|
33
|
+
|
34
|
+
const isCurrentTabEmpty = filteredModels.length === 0;
|
23
35
|
return (
|
24
36
|
<>
|
25
37
|
<Flexbox horizontal justify={'space-between'}>
|
@@ -63,17 +75,23 @@ const EnabledModelList = () => {
|
|
63
75
|
/>
|
64
76
|
)}
|
65
77
|
</Flexbox>
|
78
|
+
|
66
79
|
{isEmpty ? (
|
67
80
|
<Center padding={12}>
|
68
81
|
<Text style={{ fontSize: 12 }} type={'secondary'}>
|
69
82
|
{t('providerModels.list.enabledEmpty')}
|
70
83
|
</Text>
|
71
84
|
</Center>
|
85
|
+
) : isCurrentTabEmpty ? (
|
86
|
+
<Center padding={12}>
|
87
|
+
<Text style={{ fontSize: 12 }} type={'secondary'}>
|
88
|
+
{t('providerModels.list.noModelsInCategory')}
|
89
|
+
</Text>
|
90
|
+
</Center>
|
72
91
|
) : (
|
73
92
|
<Flexbox gap={2}>
|
74
|
-
{
|
93
|
+
{filteredModels.map(({ displayName, id, ...res }) => {
|
75
94
|
const label = displayName || id;
|
76
|
-
|
77
95
|
return <ModelItem displayName={label as string} id={id as string} key={id} {...res} />;
|
78
96
|
})}
|
79
97
|
</Flexbox>
|
@@ -1,7 +1,18 @@
|
|
1
1
|
'use client';
|
2
2
|
|
3
|
+
import { Icon, Tabs } from '@lobehub/ui';
|
3
4
|
import { useTheme } from 'antd-style';
|
4
|
-
import
|
5
|
+
import isEqual from 'fast-deep-equal';
|
6
|
+
import {
|
7
|
+
AudioLines,
|
8
|
+
BoltIcon,
|
9
|
+
Grid3x3Icon,
|
10
|
+
ImageIcon,
|
11
|
+
MessageSquareTextIcon,
|
12
|
+
MicIcon,
|
13
|
+
} from 'lucide-react';
|
14
|
+
import { Suspense, memo, useMemo, useState } from 'react';
|
15
|
+
import { useTranslation } from 'react-i18next';
|
5
16
|
import { Flexbox } from 'react-layout-kit';
|
6
17
|
|
7
18
|
import { useIsMobile } from '@/hooks/useIsMobile';
|
@@ -20,14 +31,92 @@ interface ContentProps {
|
|
20
31
|
}
|
21
32
|
|
22
33
|
const Content = memo<ContentProps>(({ id }) => {
|
34
|
+
const { t } = useTranslation('modelProvider');
|
35
|
+
const [activeTab, setActiveTab] = useState('all');
|
36
|
+
|
23
37
|
const [isSearching, isEmpty, useFetchAiProviderModels] = useAiInfraStore((s) => [
|
24
38
|
!!s.modelSearchKeyword,
|
25
39
|
aiModelSelectors.isEmptyAiProviderModelList(s),
|
26
40
|
s.useFetchAiProviderModels,
|
27
41
|
]);
|
28
42
|
|
43
|
+
const allModels = useAiInfraStore(aiModelSelectors.filteredAiProviderModelList, isEqual);
|
44
|
+
|
29
45
|
const { isLoading } = useFetchAiProviderModels(id);
|
30
46
|
|
47
|
+
// Count models by type (for all models, not just enabled)
|
48
|
+
const modelCounts = useMemo(() => {
|
49
|
+
const counts = {
|
50
|
+
all: allModels.length,
|
51
|
+
chat: 0,
|
52
|
+
embedding: 0,
|
53
|
+
image: 0,
|
54
|
+
stt: 0,
|
55
|
+
tts: 0,
|
56
|
+
};
|
57
|
+
|
58
|
+
allModels.forEach((model) => {
|
59
|
+
const type = model.type;
|
60
|
+
if (type && Object.prototype.hasOwnProperty.call(counts, type)) {
|
61
|
+
counts[type as keyof typeof counts]++;
|
62
|
+
}
|
63
|
+
});
|
64
|
+
|
65
|
+
return counts;
|
66
|
+
}, [allModels]);
|
67
|
+
|
68
|
+
// Tab definitions with counts (only show tabs with models > 0, except 'all' tab)
|
69
|
+
const tabs = useMemo(() => {
|
70
|
+
const formatTabLabel = (baseLabel: string, count: number) =>
|
71
|
+
count > 0 ? `${baseLabel} (${count})` : baseLabel;
|
72
|
+
|
73
|
+
const allTabs = [
|
74
|
+
{
|
75
|
+
count: modelCounts.all,
|
76
|
+
icon: <Icon icon={Grid3x3Icon} size={16} />,
|
77
|
+
key: 'all',
|
78
|
+
label: formatTabLabel(t('providerModels.tabs.all'), modelCounts.all),
|
79
|
+
},
|
80
|
+
{
|
81
|
+
count: modelCounts.chat,
|
82
|
+
icon: <Icon icon={MessageSquareTextIcon} size={16} />,
|
83
|
+
key: 'chat',
|
84
|
+
label: formatTabLabel(t('providerModels.tabs.chat'), modelCounts.chat),
|
85
|
+
},
|
86
|
+
{
|
87
|
+
count: modelCounts.image,
|
88
|
+
icon: <Icon icon={ImageIcon} size={16} />,
|
89
|
+
key: 'image',
|
90
|
+
label: formatTabLabel(t('providerModels.tabs.image'), modelCounts.image),
|
91
|
+
},
|
92
|
+
{
|
93
|
+
count: modelCounts.embedding,
|
94
|
+
icon: <Icon icon={BoltIcon} size={16} />,
|
95
|
+
key: 'embedding',
|
96
|
+
label: formatTabLabel(t('providerModels.tabs.embedding'), modelCounts.embedding),
|
97
|
+
},
|
98
|
+
{
|
99
|
+
count: modelCounts.stt,
|
100
|
+
icon: <Icon icon={MicIcon} size={16} />,
|
101
|
+
key: 'stt',
|
102
|
+
label: formatTabLabel(t('providerModels.tabs.stt'), modelCounts.stt),
|
103
|
+
},
|
104
|
+
{
|
105
|
+
count: modelCounts.tts,
|
106
|
+
icon: <Icon icon={AudioLines} size={16} />,
|
107
|
+
key: 'tts',
|
108
|
+
label: formatTabLabel(t('providerModels.tabs.tts'), modelCounts.tts),
|
109
|
+
},
|
110
|
+
];
|
111
|
+
|
112
|
+
// Only show tabs that have models (count > 0), but always show 'all' tab
|
113
|
+
return allTabs.filter((tab) => tab.key === 'all' || tab.count > 0);
|
114
|
+
}, [modelCounts]);
|
115
|
+
|
116
|
+
// Ensure active tab is available, fallback to 'all' if current tab is hidden
|
117
|
+
const availableTabKeys = tabs.map((tab) => tab.key);
|
118
|
+
const currentActiveTab = availableTabKeys.includes(activeTab) ? activeTab : 'all';
|
119
|
+
|
31
120
|
if (isLoading) return <SkeletonList />;
|
32
121
|
|
33
122
|
if (isSearching) return <SearchResult />;
|
@@ -36,8 +125,15 @@ const Content = memo<ContentProps>(({ id }) => {
|
|
36
125
|
<EmptyModels provider={id} />
|
37
126
|
) : (
|
38
127
|
<Flexbox>
|
39
|
-
<
|
40
|
-
|
128
|
+
<Tabs
|
129
|
+
activeKey={currentActiveTab}
|
130
|
+
items={tabs}
|
131
|
+
onChange={setActiveTab}
|
132
|
+
size="small"
|
133
|
+
style={{ marginBottom: 12 }}
|
134
|
+
/>
|
135
|
+
<EnabledModelList activeTab={currentActiveTab} />
|
136
|
+
<DisabledModels activeTab={currentActiveTab} />
|
41
137
|
</Flexbox>
|
42
138
|
);
|
43
139
|
});
|