@lobehub/lobehub 2.0.0-next.321 → 2.0.0-next.323
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +80 -0
- package/apps/desktop/src/main/core/infrastructure/UpdaterManager.ts +9 -76
- package/apps/desktop/src/main/core/infrastructure/__tests__/UpdaterManager.test.ts +0 -1
- package/apps/desktop/src/main/modules/updater/configs.ts +0 -4
- package/changelog/v1.json +20 -0
- package/e2e/src/mocks/llm/index.ts +3 -3
- package/locales/ar/common.json +5 -0
- package/locales/ar/error.json +10 -1
- package/locales/bg-BG/common.json +5 -0
- package/locales/bg-BG/error.json +10 -1
- package/locales/de-DE/common.json +5 -0
- package/locales/de-DE/error.json +10 -1
- package/locales/en-US/common.json +5 -0
- package/locales/es-ES/common.json +5 -0
- package/locales/es-ES/error.json +10 -1
- package/locales/fa-IR/common.json +5 -0
- package/locales/fa-IR/error.json +10 -1
- package/locales/fr-FR/common.json +5 -0
- package/locales/fr-FR/error.json +10 -1
- package/locales/it-IT/common.json +5 -0
- package/locales/it-IT/error.json +10 -1
- package/locales/ja-JP/common.json +5 -0
- package/locales/ja-JP/error.json +10 -1
- package/locales/ko-KR/common.json +5 -0
- package/locales/ko-KR/error.json +10 -1
- package/locales/nl-NL/common.json +5 -0
- package/locales/nl-NL/error.json +10 -1
- package/locales/pl-PL/common.json +5 -0
- package/locales/pl-PL/error.json +10 -1
- package/locales/pt-BR/common.json +5 -0
- package/locales/pt-BR/error.json +10 -1
- package/locales/ru-RU/common.json +5 -0
- package/locales/ru-RU/error.json +10 -1
- package/locales/tr-TR/common.json +5 -0
- package/locales/tr-TR/error.json +10 -1
- package/locales/vi-VN/common.json +5 -0
- package/locales/vi-VN/error.json +10 -1
- package/locales/zh-CN/common.json +5 -0
- package/locales/zh-TW/common.json +5 -0
- package/locales/zh-TW/error.json +10 -1
- package/package.json +2 -2
- package/packages/business/const/src/branding.ts +1 -0
- package/packages/business/const/src/llm.ts +2 -1
- package/packages/const/src/settings/llm.ts +2 -1
- package/packages/const/src/settings/systemAgent.ts +12 -7
- package/packages/database/src/models/agent.ts +18 -1
- package/packages/database/src/models/chatGroup.ts +18 -1
- package/packages/database/src/types/chatGroup.ts +1 -0
- package/packages/model-bank/package.json +1 -1
- package/packages/model-bank/src/aiModels/index.ts +2 -2
- package/packages/model-bank/src/aiModels/lobehub/chat/anthropic.ts +256 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/deepseek.ts +45 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/google.ts +267 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/index.ts +26 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/minimax.ts +75 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/moonshot.ts +28 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/openai.ts +345 -0
- package/packages/model-bank/src/aiModels/lobehub/chat/xai.ts +32 -0
- package/packages/model-bank/src/aiModels/lobehub/image.ts +240 -0
- package/packages/model-bank/src/aiModels/lobehub/index.ts +10 -0
- package/packages/model-bank/src/aiModels/lobehub/utils.ts +58 -0
- package/packages/model-bank/src/modelProviders/index.ts +10 -10
- package/packages/model-runtime/src/core/streams/qwen.test.ts +320 -0
- package/packages/model-runtime/src/core/streams/qwen.ts +19 -10
- package/packages/types/package.json +1 -1
- package/packages/types/src/agentGroup/index.ts +2 -0
- package/packages/types/src/discover/assistants.ts +9 -0
- package/packages/types/src/discover/fork.ts +163 -0
- package/packages/types/src/discover/groupAgents.ts +13 -4
- package/packages/types/src/discover/index.ts +9 -0
- package/src/app/[variants]/(auth)/_layout/index.tsx +2 -1
- package/src/app/[variants]/(auth)/auth-error/page.tsx +5 -5
- package/src/app/[variants]/(main)/agent/_layout/Sidebar/Topic/List/Item/index.tsx +1 -2
- package/src/app/[variants]/(main)/agent/profile/index.tsx +15 -2
- package/src/app/[variants]/(main)/community/(detail)/agent/features/Header.tsx +37 -0
- package/src/app/[variants]/(main)/community/(detail)/agent/features/Sidebar/ActionButton/ForkAndChat.tsx +133 -0
- package/src/app/[variants]/(main)/community/(detail)/agent/features/Sidebar/ActionButton/index.tsx +2 -2
- package/src/app/[variants]/(main)/community/(detail)/group_agent/features/Details/index.tsx +7 -10
- package/src/app/[variants]/(main)/community/(detail)/group_agent/features/Sidebar/ActionButton/ForkGroupAndChat.tsx +208 -0
- package/src/app/[variants]/(main)/community/(detail)/group_agent/features/Sidebar/ActionButton/index.tsx +2 -2
- package/src/app/[variants]/(main)/community/(detail)/user/features/DetailProvider.tsx +2 -0
- package/src/app/[variants]/(main)/community/(detail)/user/features/UserContent.tsx +7 -0
- package/src/app/[variants]/(main)/community/(detail)/user/features/UserForkedAgentGroups.tsx +63 -0
- package/src/app/[variants]/(main)/community/(detail)/user/features/UserForkedAgents.tsx +61 -0
- package/src/app/[variants]/(main)/community/(detail)/user/index.tsx +3 -1
- package/src/app/[variants]/(main)/group/_layout/Sidebar/Topic/List/Item/index.tsx +1 -2
- package/src/app/[variants]/(main)/settings/profile/index.tsx +92 -68
- package/src/app/[variants]/(mobile)/chat/features/Topic/index.tsx +2 -1
- package/src/features/CommandMenu/AskAgentCommands.tsx +105 -0
- package/src/features/CommandMenu/CommandMenuContext.tsx +57 -38
- package/src/features/CommandMenu/components/CommandInput.tsx +43 -9
- package/src/features/CommandMenu/index.tsx +89 -27
- package/src/features/CommandMenu/types.ts +6 -0
- package/src/features/CommandMenu/useCommandMenu.ts +62 -39
- package/src/features/PageEditor/PageEditor.tsx +20 -8
- package/src/locales/default/common.ts +5 -0
- package/src/locales/default/discover.ts +371 -0
- package/src/server/globalConfig/parseMemoryExtractionConfig.ts +7 -8
- package/src/server/routers/lambda/agent.ts +14 -0
- package/src/server/routers/lambda/agentGroup.ts +19 -3
- package/src/server/routers/lambda/market/agent.ts +234 -26
- package/src/server/routers/lambda/market/agentGroup.ts +204 -1
- package/src/server/services/discover/index.ts +52 -2
- package/src/server/services/memory/userMemory/__tests__/extract.runtime.test.ts +12 -2
- package/src/server/services/memory/userMemory/extract.ts +11 -2
- package/src/services/agent.ts +8 -0
- package/src/services/chatGroup/index.ts +8 -0
- package/src/services/marketApi.ts +78 -0
- package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +12 -12
- package/src/utils/styles.ts +10 -0
- package/packages/model-bank/src/aiModels/lobehub.ts +0 -1315
|
@@ -99,6 +99,8 @@
|
|
|
99
99
|
"cmdk.askAI": "Hỏi Tác nhân",
|
|
100
100
|
"cmdk.askAIHeading": "Sử dụng các tính năng sau cho {{query}}",
|
|
101
101
|
"cmdk.askAIHeadingEmpty": "Chọn một tính năng AI",
|
|
102
|
+
"cmdk.askAgentHeading": "Hỏi Đại Diện",
|
|
103
|
+
"cmdk.askAgentPlaceholder": "Hỏi {{agent}} điều gì đó...",
|
|
102
104
|
"cmdk.askLobeAI": "Hỏi Lobe AI",
|
|
103
105
|
"cmdk.community": "Cộng đồng",
|
|
104
106
|
"cmdk.communitySupport": "Hỗ trợ cộng đồng",
|
|
@@ -114,8 +116,10 @@
|
|
|
114
116
|
"cmdk.context.settings": "Cài đặt",
|
|
115
117
|
"cmdk.discover": "Khám phá",
|
|
116
118
|
"cmdk.keyboard.ESC": "ESC",
|
|
119
|
+
"cmdk.keyboard.Enter": "Enter",
|
|
117
120
|
"cmdk.keyboard.Tab": "Tab",
|
|
118
121
|
"cmdk.memory": "Bộ nhớ",
|
|
122
|
+
"cmdk.mentionAgent": "Nhắc đến Đại Diện",
|
|
119
123
|
"cmdk.navigate": "Điều hướng",
|
|
120
124
|
"cmdk.newAgent": "Tạo Tác nhân mới",
|
|
121
125
|
"cmdk.newAgentTeam": "Tạo Nhóm mới",
|
|
@@ -151,6 +155,7 @@
|
|
|
151
155
|
"cmdk.search.topic": "Chủ đề",
|
|
152
156
|
"cmdk.search.topics": "Chủ đề",
|
|
153
157
|
"cmdk.searchPlaceholder": "Nhập lệnh hoặc tìm kiếm...",
|
|
158
|
+
"cmdk.sendToAgent": "Gửi đến {{agent}}",
|
|
154
159
|
"cmdk.settings": "Cài đặt",
|
|
155
160
|
"cmdk.starOnGitHub": "Gắn sao cho chúng tôi trên GitHub",
|
|
156
161
|
"cmdk.submitIssue": "Gửi vấn đề",
|
package/locales/vi-VN/error.json
CHANGED
|
@@ -12,6 +12,11 @@
|
|
|
12
12
|
"import.importConfigFile.title": "Nhập thất bại",
|
|
13
13
|
"import.incompatible.description": "Tệp này được xuất từ phiên bản cao hơn. Vui lòng nâng cấp lên phiên bản mới nhất rồi thử nhập lại.",
|
|
14
14
|
"import.incompatible.title": "Ứng dụng hiện tại không hỗ trợ nhập tệp này",
|
|
15
|
+
"inviteCode.currentEmail": "Tài khoản hiện tại: {{email}}",
|
|
16
|
+
"inviteCode.desc": "Bạn cần có mã mời để truy cập LobeHub. Vui lòng nhập mã mời hợp lệ để tiếp tục.",
|
|
17
|
+
"inviteCode.friends": "Bạn bè",
|
|
18
|
+
"inviteCode.getCodeHint": "Lấy mã mời từ:",
|
|
19
|
+
"inviteCode.title": "Yêu Cầu Mã Mời",
|
|
15
20
|
"loginRequired.desc": "Bạn sẽ được chuyển hướng đến trang đăng nhập trong giây lát",
|
|
16
21
|
"loginRequired.title": "Vui lòng đăng nhập để sử dụng tính năng này",
|
|
17
22
|
"notFound.backHome": "Quay về Trang chủ",
|
|
@@ -144,5 +149,9 @@
|
|
|
144
149
|
"upload.networkError": "Vui lòng kiểm tra kết nối mạng và đảm bảo cấu hình CORS của dịch vụ lưu trữ tệp là chính xác.",
|
|
145
150
|
"upload.title": "Tải tệp thất bại. Vui lòng kiểm tra kết nối mạng hoặc thử lại sau",
|
|
146
151
|
"upload.unknownError": "Lý do lỗi: {{reason}}",
|
|
147
|
-
"upload.uploadFailed": "Tải tệp thất bại."
|
|
152
|
+
"upload.uploadFailed": "Tải tệp thất bại.",
|
|
153
|
+
"waitlist.currentEmail": "Tài khoản hiện tại: {{email}}",
|
|
154
|
+
"waitlist.desc": "Tài khoản của bạn chưa có trong danh sách cho phép. Vui lòng liên hệ quản trị viên để yêu cầu quyền truy cập.",
|
|
155
|
+
"waitlist.switchAccount": "Chuyển Tài Khoản",
|
|
156
|
+
"waitlist.title": "Hạn Chế Truy Cập"
|
|
148
157
|
}
|
|
@@ -99,6 +99,8 @@
|
|
|
99
99
|
"cmdk.askAI": "问助理",
|
|
100
100
|
"cmdk.askAIHeading": "使用以下功能处理 {{query}}",
|
|
101
101
|
"cmdk.askAIHeadingEmpty": "选择一个 AI 功能",
|
|
102
|
+
"cmdk.askAgentHeading": "选择助理",
|
|
103
|
+
"cmdk.askAgentPlaceholder": "向 {{agent}} 提问…",
|
|
102
104
|
"cmdk.askLobeAI": "问 Lobe AI",
|
|
103
105
|
"cmdk.community": "社区",
|
|
104
106
|
"cmdk.communitySupport": "社区支持",
|
|
@@ -114,8 +116,10 @@
|
|
|
114
116
|
"cmdk.context.settings": "设置",
|
|
115
117
|
"cmdk.discover": "发现",
|
|
116
118
|
"cmdk.keyboard.ESC": "ESC",
|
|
119
|
+
"cmdk.keyboard.Enter": "Enter",
|
|
117
120
|
"cmdk.keyboard.Tab": "Tab",
|
|
118
121
|
"cmdk.memory": "记忆",
|
|
122
|
+
"cmdk.mentionAgent": "提及助理",
|
|
119
123
|
"cmdk.navigate": "导航",
|
|
120
124
|
"cmdk.newAgent": "新建助理",
|
|
121
125
|
"cmdk.newAgentTeam": "新建助理团队",
|
|
@@ -151,6 +155,7 @@
|
|
|
151
155
|
"cmdk.search.topic": "话题",
|
|
152
156
|
"cmdk.search.topics": "话题",
|
|
153
157
|
"cmdk.searchPlaceholder": "输入命令或搜索…",
|
|
158
|
+
"cmdk.sendToAgent": "发送给 {{agent}}",
|
|
154
159
|
"cmdk.settings": "设置",
|
|
155
160
|
"cmdk.starOnGitHub": "在 GitHub 上给我们 Star",
|
|
156
161
|
"cmdk.submitIssue": "提交问题",
|
|
@@ -99,6 +99,8 @@
|
|
|
99
99
|
"cmdk.askAI": "詢問 AI",
|
|
100
100
|
"cmdk.askAIHeading": "使用以下功能處理 {{query}}",
|
|
101
101
|
"cmdk.askAIHeadingEmpty": "選擇一個 AI 功能",
|
|
102
|
+
"cmdk.askAgentHeading": "詢問代理",
|
|
103
|
+
"cmdk.askAgentPlaceholder": "向 {{agent}} 詢問問題……",
|
|
102
104
|
"cmdk.askLobeAI": "詢問 Lobe AI",
|
|
103
105
|
"cmdk.community": "社群",
|
|
104
106
|
"cmdk.communitySupport": "社群支援",
|
|
@@ -114,8 +116,10 @@
|
|
|
114
116
|
"cmdk.context.settings": "設定",
|
|
115
117
|
"cmdk.discover": "探索",
|
|
116
118
|
"cmdk.keyboard.ESC": "ESC",
|
|
119
|
+
"cmdk.keyboard.Enter": "輸入",
|
|
117
120
|
"cmdk.keyboard.Tab": "Tab",
|
|
118
121
|
"cmdk.memory": "記憶",
|
|
122
|
+
"cmdk.mentionAgent": "提及代理",
|
|
119
123
|
"cmdk.navigate": "導覽",
|
|
120
124
|
"cmdk.newAgent": "新增助手",
|
|
121
125
|
"cmdk.newAgentTeam": "新增助理團隊",
|
|
@@ -151,6 +155,7 @@
|
|
|
151
155
|
"cmdk.search.topic": "主題",
|
|
152
156
|
"cmdk.search.topics": "主題",
|
|
153
157
|
"cmdk.searchPlaceholder": "輸入指令或搜尋...",
|
|
158
|
+
"cmdk.sendToAgent": "發送給 {{agent}}",
|
|
154
159
|
"cmdk.settings": "設定",
|
|
155
160
|
"cmdk.starOnGitHub": "在 GitHub 上給我們星標",
|
|
156
161
|
"cmdk.submitIssue": "提交問題",
|
package/locales/zh-TW/error.json
CHANGED
|
@@ -12,6 +12,11 @@
|
|
|
12
12
|
"import.importConfigFile.title": "匯入失敗",
|
|
13
13
|
"import.incompatible.description": "該檔案由更高版本匯出,請嘗試升級到最新版本後再重新匯入",
|
|
14
14
|
"import.incompatible.title": "當前應用不支援匯入該檔案",
|
|
15
|
+
"inviteCode.currentEmail": "當前帳號:{{email}}",
|
|
16
|
+
"inviteCode.desc": "需要邀請碼才能存取 LobeHub。請輸入有效的邀請碼以繼續。",
|
|
17
|
+
"inviteCode.friends": "朋友",
|
|
18
|
+
"inviteCode.getCodeHint": "請向以下來源取得邀請碼:",
|
|
19
|
+
"inviteCode.title": "需要邀請碼",
|
|
15
20
|
"loginRequired.desc": "即將自動跳轉到登入頁面",
|
|
16
21
|
"loginRequired.title": "請登入後使用該功能",
|
|
17
22
|
"notFound.backHome": "返回首頁",
|
|
@@ -144,5 +149,9 @@
|
|
|
144
149
|
"upload.networkError": "請確認你的網路是否正常,並檢查檔案儲存服務的跨域配置是否正確",
|
|
145
150
|
"upload.title": "檔案上傳失敗,請檢查網路連線或稍後再試",
|
|
146
151
|
"upload.unknownError": "錯誤原因: {{reason}}",
|
|
147
|
-
"upload.uploadFailed": "檔案上傳失敗"
|
|
152
|
+
"upload.uploadFailed": "檔案上傳失敗",
|
|
153
|
+
"waitlist.currentEmail": "當前帳號:{{email}}",
|
|
154
|
+
"waitlist.desc": "您的帳號尚未列入白名單。請聯絡管理員以申請存取權限。",
|
|
155
|
+
"waitlist.switchAccount": "切換帳號",
|
|
156
|
+
"waitlist.title": "存取受限"
|
|
148
157
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.323",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -205,7 +205,7 @@
|
|
|
205
205
|
"@lobehub/desktop-ipc-typings": "workspace:*",
|
|
206
206
|
"@lobehub/editor": "^3.11.0",
|
|
207
207
|
"@lobehub/icons": "^4.0.2",
|
|
208
|
-
"@lobehub/market-sdk": "0.29.
|
|
208
|
+
"@lobehub/market-sdk": "0.29.1",
|
|
209
209
|
"@lobehub/tts": "^4.0.2",
|
|
210
210
|
"@lobehub/ui": "^4.22.0",
|
|
211
211
|
"@modelcontextprotocol/sdk": "^1.25.1",
|
|
@@ -1,25 +1,30 @@
|
|
|
1
|
-
import { DEFAULT_PROVIDER } from '@lobechat/business-const';
|
|
1
|
+
import { DEFAULT_MINI_PROVIDER, DEFAULT_PROVIDER } from '@lobechat/business-const';
|
|
2
2
|
import { QueryRewriteSystemAgent, SystemAgentItem, UserSystemAgentConfig } from '@lobechat/types';
|
|
3
3
|
|
|
4
|
-
import { DEFAULT_MODEL } from './llm';
|
|
4
|
+
import { DEFAULT_MINI_MODEL, DEFAULT_MODEL } from './llm';
|
|
5
5
|
|
|
6
6
|
export const DEFAULT_SYSTEM_AGENT_ITEM: SystemAgentItem = {
|
|
7
7
|
model: DEFAULT_MODEL,
|
|
8
8
|
provider: DEFAULT_PROVIDER,
|
|
9
9
|
};
|
|
10
10
|
|
|
11
|
+
export const DEFAULT_MINI_SYSTEM_AGENT_ITEM: SystemAgentItem = {
|
|
12
|
+
model: DEFAULT_MINI_MODEL,
|
|
13
|
+
provider: DEFAULT_MINI_PROVIDER,
|
|
14
|
+
};
|
|
15
|
+
|
|
11
16
|
export const DEFAULT_QUERY_REWRITE_SYSTEM_AGENT_ITEM: QueryRewriteSystemAgent = {
|
|
12
17
|
enabled: true,
|
|
13
|
-
model:
|
|
14
|
-
provider:
|
|
18
|
+
model: DEFAULT_MINI_SYSTEM_AGENT_ITEM.model,
|
|
19
|
+
provider: DEFAULT_MINI_SYSTEM_AGENT_ITEM.provider,
|
|
15
20
|
};
|
|
16
21
|
|
|
17
22
|
export const DEFAULT_SYSTEM_AGENT_CONFIG: UserSystemAgentConfig = {
|
|
18
23
|
agentMeta: DEFAULT_SYSTEM_AGENT_ITEM,
|
|
19
|
-
generationTopic:
|
|
24
|
+
generationTopic: DEFAULT_MINI_SYSTEM_AGENT_ITEM,
|
|
20
25
|
historyCompress: DEFAULT_SYSTEM_AGENT_ITEM,
|
|
21
26
|
queryRewrite: DEFAULT_QUERY_REWRITE_SYSTEM_AGENT_ITEM,
|
|
22
27
|
thread: DEFAULT_SYSTEM_AGENT_ITEM,
|
|
23
|
-
topic:
|
|
24
|
-
translation:
|
|
28
|
+
topic: DEFAULT_MINI_SYSTEM_AGENT_ITEM,
|
|
29
|
+
translation: DEFAULT_MINI_SYSTEM_AGENT_ITEM,
|
|
25
30
|
};
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { getAgentPersistConfig } from '@lobechat/builtin-agents';
|
|
2
2
|
import { INBOX_SESSION_ID } from '@lobechat/const';
|
|
3
|
-
import { and, desc, eq, ilike, inArray, isNull, or } from 'drizzle-orm';
|
|
3
|
+
import { and, desc, eq, ilike, inArray, isNull, or, sql } from 'drizzle-orm';
|
|
4
4
|
import type { PartialDeep } from 'type-fest';
|
|
5
5
|
|
|
6
6
|
import { merge } from '@/utils/merge';
|
|
@@ -379,6 +379,23 @@ export class AgentModel {
|
|
|
379
379
|
return result?.id ?? null;
|
|
380
380
|
};
|
|
381
381
|
|
|
382
|
+
/**
|
|
383
|
+
* Get an agent by the forkedFromIdentifier stored in params
|
|
384
|
+
* @param forkedFromIdentifier - The source agent's market identifier
|
|
385
|
+
* @returns agent id if exists, null otherwise
|
|
386
|
+
*/
|
|
387
|
+
getAgentByForkedFromIdentifier = async (forkedFromIdentifier: string): Promise<string | null> => {
|
|
388
|
+
const result = await this.db.query.agents.findFirst({
|
|
389
|
+
columns: { id: true },
|
|
390
|
+
orderBy: (agents, { desc }) => [desc(agents.updatedAt)],
|
|
391
|
+
where: and(
|
|
392
|
+
eq(agents.userId, this.userId),
|
|
393
|
+
sql`${agents.params}->>'forkedFromIdentifier' = ${forkedFromIdentifier}`,
|
|
394
|
+
),
|
|
395
|
+
});
|
|
396
|
+
return result?.id ?? null;
|
|
397
|
+
};
|
|
398
|
+
|
|
382
399
|
updateConfig = async (agentId: string, data: PartialDeep<AgentItem> | undefined | null) => {
|
|
383
400
|
if (!data || Object.keys(data).length === 0) return;
|
|
384
401
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { and, desc, eq, inArray } from 'drizzle-orm';
|
|
1
|
+
import { and, desc, eq, inArray, sql } from 'drizzle-orm';
|
|
2
2
|
|
|
3
3
|
import {
|
|
4
4
|
ChatGroupAgentItem,
|
|
@@ -35,6 +35,23 @@ export class ChatGroupModel {
|
|
|
35
35
|
});
|
|
36
36
|
}
|
|
37
37
|
|
|
38
|
+
/**
|
|
39
|
+
* Get a chat group by the forkedFromIdentifier stored in config
|
|
40
|
+
* @param forkedFromIdentifier - The source group's market identifier
|
|
41
|
+
* @returns group id if exists, null otherwise
|
|
42
|
+
*/
|
|
43
|
+
async getGroupByForkedFromIdentifier(forkedFromIdentifier: string): Promise<string | null> {
|
|
44
|
+
const result = await this.db.query.chatGroups.findFirst({
|
|
45
|
+
columns: { id: true },
|
|
46
|
+
orderBy: [desc(chatGroups.updatedAt)],
|
|
47
|
+
where: and(
|
|
48
|
+
eq(chatGroups.userId, this.userId),
|
|
49
|
+
sql`${chatGroups.config}->>'forkedFromIdentifier' = ${forkedFromIdentifier}`,
|
|
50
|
+
),
|
|
51
|
+
});
|
|
52
|
+
return result?.id ?? null;
|
|
53
|
+
}
|
|
54
|
+
|
|
38
55
|
async queryWithMemberDetails(): Promise<any[]> {
|
|
39
56
|
const groups = await this.query();
|
|
40
57
|
if (groups.length === 0) return [];
|
|
@@ -29,7 +29,7 @@
|
|
|
29
29
|
"./github": "./src/aiModels/github.ts",
|
|
30
30
|
"./google": "./src/aiModels/google.ts",
|
|
31
31
|
"./groq": "./src/aiModels/groq.ts",
|
|
32
|
-
"./lobehub": "./src/aiModels/lobehub.ts",
|
|
32
|
+
"./lobehub": "./src/aiModels/lobehub/index.ts",
|
|
33
33
|
"./higress": "./src/aiModels/higress.ts",
|
|
34
34
|
"./huggingface": "./src/aiModels/huggingface.ts",
|
|
35
35
|
"./hunyuan": "./src/aiModels/hunyuan.ts",
|
|
@@ -31,7 +31,7 @@ import { default as infiniai } from './infiniai';
|
|
|
31
31
|
import { default as internlm } from './internlm';
|
|
32
32
|
import { default as jina } from './jina';
|
|
33
33
|
import { default as lmstudio } from './lmstudio';
|
|
34
|
-
import { default as lobehub } from './lobehub';
|
|
34
|
+
import { default as lobehub } from './lobehub/index';
|
|
35
35
|
import { default as minimax } from './minimax';
|
|
36
36
|
import { default as mistral } from './mistral';
|
|
37
37
|
import { default as modelscope } from './modelscope';
|
|
@@ -194,7 +194,7 @@ export { default as infiniai } from './infiniai';
|
|
|
194
194
|
export { default as internlm } from './internlm';
|
|
195
195
|
export { default as jina } from './jina';
|
|
196
196
|
export { default as lmstudio } from './lmstudio';
|
|
197
|
-
export { default as lobehub } from './lobehub';
|
|
197
|
+
export { default as lobehub } from './lobehub/index';
|
|
198
198
|
export { default as minimax } from './minimax';
|
|
199
199
|
export { default as mistral } from './mistral';
|
|
200
200
|
export { default as modelscope } from './modelscope';
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
import { AIChatModelCard } from '../../../types/aiModel';
|
|
2
|
+
|
|
3
|
+
export const anthropicChatModels: AIChatModelCard[] = [
|
|
4
|
+
{
|
|
5
|
+
abilities: {
|
|
6
|
+
functionCall: true,
|
|
7
|
+
reasoning: true,
|
|
8
|
+
search: true,
|
|
9
|
+
vision: true,
|
|
10
|
+
},
|
|
11
|
+
contextWindowTokens: 200_000,
|
|
12
|
+
description: "Claude Sonnet 4.5 is Anthropic's most intelligent model to date.",
|
|
13
|
+
displayName: 'Claude Sonnet 4.5',
|
|
14
|
+
enabled: true,
|
|
15
|
+
id: 'claude-sonnet-4-5-20250929',
|
|
16
|
+
maxOutput: 8192,
|
|
17
|
+
pricing: {
|
|
18
|
+
units: [
|
|
19
|
+
{ name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
|
20
|
+
{ name: 'textOutput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
21
|
+
{ name: 'textInput_cacheRead', rate: 0.3, strategy: 'fixed', unit: 'millionTokens' },
|
|
22
|
+
{ name: 'textInput_cacheWrite', rate: 3.75, strategy: 'fixed', unit: 'millionTokens' },
|
|
23
|
+
],
|
|
24
|
+
},
|
|
25
|
+
releasedAt: '2025-09-30',
|
|
26
|
+
settings: {
|
|
27
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
28
|
+
},
|
|
29
|
+
type: 'chat',
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
abilities: {
|
|
33
|
+
functionCall: true,
|
|
34
|
+
reasoning: true,
|
|
35
|
+
search: true,
|
|
36
|
+
vision: true,
|
|
37
|
+
},
|
|
38
|
+
contextWindowTokens: 200_000,
|
|
39
|
+
description:
|
|
40
|
+
"Claude Sonnet 4 is Anthropic's most intelligent model to date, offering near-instant responses or extended step-by-step thinking with fine-grained control for API users.",
|
|
41
|
+
displayName: 'Claude Sonnet 4',
|
|
42
|
+
id: 'claude-sonnet-4-20250514',
|
|
43
|
+
maxOutput: 8192,
|
|
44
|
+
pricing: {
|
|
45
|
+
units: [
|
|
46
|
+
{ name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
|
47
|
+
{ name: 'textOutput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
48
|
+
{ name: 'textInput_cacheRead', rate: 0.3, strategy: 'fixed', unit: 'millionTokens' },
|
|
49
|
+
{
|
|
50
|
+
lookup: { prices: { '1h': 6, '5m': 3.75 }, pricingParams: ['ttl'] },
|
|
51
|
+
name: 'textInput_cacheWrite',
|
|
52
|
+
strategy: 'lookup',
|
|
53
|
+
unit: 'millionTokens',
|
|
54
|
+
},
|
|
55
|
+
],
|
|
56
|
+
},
|
|
57
|
+
releasedAt: '2025-05-23',
|
|
58
|
+
settings: {
|
|
59
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
60
|
+
},
|
|
61
|
+
type: 'chat',
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
abilities: {
|
|
65
|
+
functionCall: true,
|
|
66
|
+
reasoning: true,
|
|
67
|
+
search: true,
|
|
68
|
+
vision: true,
|
|
69
|
+
},
|
|
70
|
+
contextWindowTokens: 200_000,
|
|
71
|
+
description:
|
|
72
|
+
"Claude Sonnet 3.7 is Anthropic's most intelligent model and the first hybrid reasoning model on the market, supporting near-instant responses or extended thinking with fine-grained control.",
|
|
73
|
+
displayName: 'Claude Sonnet 3.7',
|
|
74
|
+
id: 'claude-3-7-sonnet-20250219',
|
|
75
|
+
maxOutput: 8192,
|
|
76
|
+
pricing: {
|
|
77
|
+
units: [
|
|
78
|
+
{ name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
|
79
|
+
{ name: 'textOutput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
80
|
+
{ name: 'textInput_cacheRead', rate: 0.3, strategy: 'fixed', unit: 'millionTokens' },
|
|
81
|
+
{
|
|
82
|
+
lookup: { prices: { '1h': 6, '5m': 3.75 }, pricingParams: ['ttl'] },
|
|
83
|
+
name: 'textInput_cacheWrite',
|
|
84
|
+
strategy: 'lookup',
|
|
85
|
+
unit: 'millionTokens',
|
|
86
|
+
},
|
|
87
|
+
],
|
|
88
|
+
},
|
|
89
|
+
settings: {
|
|
90
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
91
|
+
},
|
|
92
|
+
type: 'chat',
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
abilities: {
|
|
96
|
+
functionCall: true,
|
|
97
|
+
reasoning: true,
|
|
98
|
+
search: true,
|
|
99
|
+
structuredOutput: true,
|
|
100
|
+
vision: true,
|
|
101
|
+
},
|
|
102
|
+
contextWindowTokens: 200_000,
|
|
103
|
+
description:
|
|
104
|
+
"Claude Opus 4.5 is Anthropic's flagship model, combining excellent intelligence and scalable performance for the highest-quality reasoning tasks.",
|
|
105
|
+
displayName: 'Claude Opus 4.5',
|
|
106
|
+
enabled: true,
|
|
107
|
+
id: 'claude-opus-4-5-20251101',
|
|
108
|
+
maxOutput: 64_000,
|
|
109
|
+
pricing: {
|
|
110
|
+
units: [
|
|
111
|
+
{ name: 'textInput_cacheRead', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
|
|
112
|
+
{ name: 'textInput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
|
|
113
|
+
{ name: 'textOutput', rate: 25, strategy: 'fixed', unit: 'millionTokens' },
|
|
114
|
+
{
|
|
115
|
+
lookup: { prices: { '1h': 10, '5m': 6.25 }, pricingParams: ['ttl'] },
|
|
116
|
+
name: 'textInput_cacheWrite',
|
|
117
|
+
strategy: 'lookup',
|
|
118
|
+
unit: 'millionTokens',
|
|
119
|
+
},
|
|
120
|
+
],
|
|
121
|
+
},
|
|
122
|
+
releasedAt: '2025-11-24',
|
|
123
|
+
settings: {
|
|
124
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
125
|
+
},
|
|
126
|
+
type: 'chat',
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
abilities: {
|
|
130
|
+
functionCall: true,
|
|
131
|
+
reasoning: true,
|
|
132
|
+
search: true,
|
|
133
|
+
vision: true,
|
|
134
|
+
},
|
|
135
|
+
contextWindowTokens: 200_000,
|
|
136
|
+
description:
|
|
137
|
+
"Claude Opus 4.1 is Anthropic's latest and most capable model for highly complex tasks, excelling in performance, intelligence, fluency, and understanding.",
|
|
138
|
+
displayName: 'Claude Opus 4.1',
|
|
139
|
+
id: 'claude-opus-4-1-20250805',
|
|
140
|
+
maxOutput: 32_000,
|
|
141
|
+
pricing: {
|
|
142
|
+
units: [
|
|
143
|
+
{ name: 'textInput_cacheRead', rate: 1.5, strategy: 'fixed', unit: 'millionTokens' },
|
|
144
|
+
{ name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
145
|
+
{ name: 'textOutput', rate: 75, strategy: 'fixed', unit: 'millionTokens' },
|
|
146
|
+
{
|
|
147
|
+
lookup: { prices: { '1h': 30, '5m': 18.75 }, pricingParams: ['ttl'] },
|
|
148
|
+
name: 'textInput_cacheWrite',
|
|
149
|
+
strategy: 'lookup',
|
|
150
|
+
unit: 'millionTokens',
|
|
151
|
+
},
|
|
152
|
+
],
|
|
153
|
+
},
|
|
154
|
+
releasedAt: '2025-08-05',
|
|
155
|
+
settings: {
|
|
156
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
157
|
+
},
|
|
158
|
+
type: 'chat',
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
abilities: {
|
|
162
|
+
functionCall: true,
|
|
163
|
+
reasoning: true,
|
|
164
|
+
search: true,
|
|
165
|
+
vision: true,
|
|
166
|
+
},
|
|
167
|
+
contextWindowTokens: 200_000,
|
|
168
|
+
description:
|
|
169
|
+
"Claude Opus 4 is Anthropic's most powerful model for highly complex tasks, excelling in performance, intelligence, fluency, and understanding.",
|
|
170
|
+
displayName: 'Claude Opus 4',
|
|
171
|
+
id: 'claude-opus-4-20250514',
|
|
172
|
+
maxOutput: 32_000,
|
|
173
|
+
pricing: {
|
|
174
|
+
units: [
|
|
175
|
+
{ name: 'textInput_cacheRead', rate: 1.5, strategy: 'fixed', unit: 'millionTokens' },
|
|
176
|
+
{ name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
177
|
+
{ name: 'textOutput', rate: 75, strategy: 'fixed', unit: 'millionTokens' },
|
|
178
|
+
{
|
|
179
|
+
lookup: { prices: { '1h': 30, '5m': 18.75 }, pricingParams: ['ttl'] },
|
|
180
|
+
name: 'textInput_cacheWrite',
|
|
181
|
+
strategy: 'lookup',
|
|
182
|
+
unit: 'millionTokens',
|
|
183
|
+
},
|
|
184
|
+
],
|
|
185
|
+
},
|
|
186
|
+
releasedAt: '2025-05-23',
|
|
187
|
+
settings: {
|
|
188
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
189
|
+
},
|
|
190
|
+
type: 'chat',
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
abilities: {
|
|
194
|
+
functionCall: true,
|
|
195
|
+
reasoning: true,
|
|
196
|
+
search: true,
|
|
197
|
+
structuredOutput: true,
|
|
198
|
+
vision: true,
|
|
199
|
+
},
|
|
200
|
+
contextWindowTokens: 200_000,
|
|
201
|
+
description:
|
|
202
|
+
"Claude Haiku 4.5 is Anthropic's fastest and most intelligent Haiku model, with lightning speed and extended thinking.",
|
|
203
|
+
displayName: 'Claude Haiku 4.5',
|
|
204
|
+
enabled: true,
|
|
205
|
+
id: 'claude-haiku-4-5-20251001',
|
|
206
|
+
maxOutput: 64_000,
|
|
207
|
+
pricing: {
|
|
208
|
+
units: [
|
|
209
|
+
{ name: 'textInput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
|
|
210
|
+
{ name: 'textOutput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
|
|
211
|
+
{ name: 'textInput_cacheRead', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
|
|
212
|
+
{
|
|
213
|
+
lookup: { prices: { '1h': 2, '5m': 1.25 }, pricingParams: ['ttl'] },
|
|
214
|
+
name: 'textInput_cacheWrite',
|
|
215
|
+
strategy: 'lookup',
|
|
216
|
+
unit: 'millionTokens',
|
|
217
|
+
},
|
|
218
|
+
],
|
|
219
|
+
},
|
|
220
|
+
releasedAt: '2025-10-16',
|
|
221
|
+
settings: {
|
|
222
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
|
223
|
+
},
|
|
224
|
+
type: 'chat',
|
|
225
|
+
},
|
|
226
|
+
{
|
|
227
|
+
abilities: {
|
|
228
|
+
functionCall: true,
|
|
229
|
+
vision: true,
|
|
230
|
+
},
|
|
231
|
+
contextWindowTokens: 200_000,
|
|
232
|
+
description:
|
|
233
|
+
"Claude 3.5 Haiku is Anthropic's fastest next-gen model, improving across skills and surpassing the previous flagship Claude 3 Opus on many benchmarks.",
|
|
234
|
+
displayName: 'Claude 3.5 Haiku',
|
|
235
|
+
id: 'claude-3-5-haiku-20241022',
|
|
236
|
+
maxOutput: 8192,
|
|
237
|
+
pricing: {
|
|
238
|
+
units: [
|
|
239
|
+
{ name: 'textInput_cacheRead', rate: 0.08, strategy: 'fixed', unit: 'millionTokens' },
|
|
240
|
+
{ name: 'textInput', rate: 0.8, strategy: 'fixed', unit: 'millionTokens' },
|
|
241
|
+
{ name: 'textOutput', rate: 4, strategy: 'fixed', unit: 'millionTokens' },
|
|
242
|
+
{
|
|
243
|
+
lookup: { prices: { '1h': 1.6, '5m': 1 }, pricingParams: ['ttl'] },
|
|
244
|
+
name: 'textInput_cacheWrite',
|
|
245
|
+
strategy: 'lookup',
|
|
246
|
+
unit: 'millionTokens',
|
|
247
|
+
},
|
|
248
|
+
],
|
|
249
|
+
},
|
|
250
|
+
releasedAt: '2024-11-05',
|
|
251
|
+
settings: {
|
|
252
|
+
extendParams: ['disableContextCaching'],
|
|
253
|
+
},
|
|
254
|
+
type: 'chat',
|
|
255
|
+
},
|
|
256
|
+
];
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { AIChatModelCard } from '../../../types/aiModel';
|
|
2
|
+
|
|
3
|
+
export const deepseekChatModels: AIChatModelCard[] = [
|
|
4
|
+
{
|
|
5
|
+
abilities: {
|
|
6
|
+
functionCall: true,
|
|
7
|
+
},
|
|
8
|
+
contextWindowTokens: 65_536,
|
|
9
|
+
description:
|
|
10
|
+
'DeepSeek V3.2 balances reasoning and output length for daily QA and agent tasks. Public benchmarks reach GPT-5 levels, and it is the first to integrate thinking into tool use, leading open-source agent evaluations.',
|
|
11
|
+
displayName: 'DeepSeek V3.2',
|
|
12
|
+
enabled: true,
|
|
13
|
+
id: 'deepseek-chat',
|
|
14
|
+
pricing: {
|
|
15
|
+
units: [
|
|
16
|
+
{ name: 'textInput', rate: 0.56, strategy: 'fixed', unit: 'millionTokens' },
|
|
17
|
+
{ name: 'textInput_cacheRead', rate: 0.07, strategy: 'fixed', unit: 'millionTokens' },
|
|
18
|
+
{ name: 'textOutput', rate: 1.68, strategy: 'fixed', unit: 'millionTokens' },
|
|
19
|
+
],
|
|
20
|
+
},
|
|
21
|
+
releasedAt: '2025-12-01',
|
|
22
|
+
type: 'chat',
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
abilities: {
|
|
26
|
+
functionCall: true,
|
|
27
|
+
reasoning: true,
|
|
28
|
+
},
|
|
29
|
+
contextWindowTokens: 65_536,
|
|
30
|
+
description:
|
|
31
|
+
'DeepSeek V3.2 Thinking is a deep reasoning model that generates chain-of-thought before outputs for higher accuracy, with top competition results and reasoning comparable to Gemini-3.0-Pro.',
|
|
32
|
+
displayName: 'DeepSeek V3.2 Thinking',
|
|
33
|
+
enabled: true,
|
|
34
|
+
id: 'deepseek-reasoner',
|
|
35
|
+
pricing: {
|
|
36
|
+
units: [
|
|
37
|
+
{ name: 'textInput', rate: 0.55, strategy: 'fixed', unit: 'millionTokens' },
|
|
38
|
+
{ name: 'textOutput', rate: 2.19, strategy: 'fixed', unit: 'millionTokens' },
|
|
39
|
+
{ name: 'textInput_cacheRead', rate: 0.14, strategy: 'fixed', unit: 'millionTokens' },
|
|
40
|
+
],
|
|
41
|
+
},
|
|
42
|
+
releasedAt: '2025-12-01',
|
|
43
|
+
type: 'chat',
|
|
44
|
+
},
|
|
45
|
+
];
|