@lobehub/lobehub 2.0.0-next.234 → 2.0.0-next.236
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.devcontainer/devcontainer.json +4 -2
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +14 -0
- package/locales/ar/components.json +1 -0
- package/locales/ar/file.json +4 -0
- package/locales/ar/models.json +29 -0
- package/locales/ar/setting.json +7 -0
- package/locales/bg-BG/components.json +1 -0
- package/locales/bg-BG/file.json +4 -0
- package/locales/bg-BG/models.json +1 -0
- package/locales/bg-BG/setting.json +7 -0
- package/locales/de-DE/components.json +1 -0
- package/locales/de-DE/file.json +4 -0
- package/locales/de-DE/models.json +29 -0
- package/locales/de-DE/setting.json +7 -0
- package/locales/en-US/common.json +0 -1
- package/locales/en-US/components.json +1 -0
- package/locales/en-US/file.json +4 -0
- package/locales/en-US/models.json +1 -0
- package/locales/es-ES/components.json +1 -0
- package/locales/es-ES/file.json +4 -0
- package/locales/es-ES/models.json +43 -0
- package/locales/es-ES/setting.json +7 -0
- package/locales/fa-IR/components.json +1 -0
- package/locales/fa-IR/file.json +4 -0
- package/locales/fa-IR/models.json +54 -0
- package/locales/fa-IR/setting.json +7 -0
- package/locales/fr-FR/components.json +1 -0
- package/locales/fr-FR/file.json +4 -0
- package/locales/fr-FR/models.json +31 -0
- package/locales/fr-FR/setting.json +7 -0
- package/locales/it-IT/components.json +1 -0
- package/locales/it-IT/file.json +4 -0
- package/locales/it-IT/models.json +43 -0
- package/locales/it-IT/setting.json +7 -0
- package/locales/ja-JP/components.json +1 -0
- package/locales/ja-JP/file.json +4 -0
- package/locales/ja-JP/models.json +28 -0
- package/locales/ja-JP/setting.json +7 -0
- package/locales/ko-KR/components.json +1 -0
- package/locales/ko-KR/file.json +4 -0
- package/locales/ko-KR/models.json +37 -0
- package/locales/ko-KR/setting.json +7 -0
- package/locales/nl-NL/components.json +1 -0
- package/locales/nl-NL/file.json +4 -0
- package/locales/nl-NL/models.json +13 -0
- package/locales/nl-NL/setting.json +7 -0
- package/locales/pl-PL/components.json +1 -0
- package/locales/pl-PL/file.json +4 -0
- package/locales/pl-PL/models.json +13 -0
- package/locales/pl-PL/setting.json +7 -0
- package/locales/pt-BR/components.json +1 -0
- package/locales/pt-BR/file.json +4 -0
- package/locales/pt-BR/models.json +29 -0
- package/locales/pt-BR/setting.json +7 -0
- package/locales/ru-RU/components.json +1 -0
- package/locales/ru-RU/file.json +4 -0
- package/locales/ru-RU/models.json +1 -0
- package/locales/ru-RU/setting.json +7 -0
- package/locales/tr-TR/components.json +1 -0
- package/locales/tr-TR/file.json +4 -0
- package/locales/tr-TR/models.json +29 -0
- package/locales/tr-TR/setting.json +7 -0
- package/locales/vi-VN/components.json +1 -0
- package/locales/vi-VN/file.json +4 -0
- package/locales/vi-VN/models.json +1 -0
- package/locales/vi-VN/setting.json +7 -0
- package/locales/zh-CN/models.json +46 -0
- package/locales/zh-TW/components.json +1 -0
- package/locales/zh-TW/file.json +4 -0
- package/locales/zh-TW/models.json +35 -0
- package/locales/zh-TW/setting.json +7 -0
- package/package.json +1 -1
- package/packages/model-bank/src/aiModels/anthropic.ts +0 -30
- package/packages/model-bank/src/aiModels/volcengine.ts +2 -1
- package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -7
- package/src/server/routers/lambda/_helpers/resolveContext.ts +8 -8
- package/src/server/routers/lambda/agent.ts +1 -1
- package/src/server/routers/lambda/aiModel.ts +1 -1
- package/src/server/routers/lambda/comfyui.ts +1 -1
- package/src/server/routers/lambda/exporter.ts +1 -1
- package/src/server/routers/lambda/image.ts +13 -13
- package/src/server/routers/lambda/klavis.ts +10 -10
- package/src/server/routers/lambda/market/index.ts +6 -6
- package/src/server/routers/lambda/message.ts +2 -2
- package/src/server/routers/lambda/plugin.ts +1 -1
- package/src/server/routers/lambda/ragEval.ts +2 -2
- package/src/server/routers/lambda/topic.ts +3 -3
- package/src/server/routers/lambda/user.ts +10 -10
- package/src/server/routers/lambda/userMemories.ts +6 -6
|
@@ -491,6 +491,41 @@
|
|
|
491
491
|
"flux-pro.description": "頂級商業圖像生成模型,擁有無與倫比的圖像品質與多樣化輸出能力。",
|
|
492
492
|
"flux-schnell.description": "FLUX.1 [schnell] 是最先進的開源少步驟模型,超越同類競品,甚至優於如 Midjourney v6.0 與 DALL-E 3(HD)等強大非蒸餾模型。其精細調校保留預訓練多樣性,顯著提升視覺品質、指令遵循、尺寸與比例變化、字體處理與輸出多樣性。",
|
|
493
493
|
"flux.1-schnell.description": "FLUX.1-schnell 是一款高效能圖像生成模型,支援快速多風格輸出。",
|
|
494
|
+
"gemini-1.0-pro-001.description": "Gemini 1.0 Pro 001(調校版)在處理複雜任務時提供穩定且可調整的效能。",
|
|
495
|
+
"gemini-1.0-pro-002.description": "Gemini 1.0 Pro 002(調校版)在處理複雜任務時提供強大的多模態支援。",
|
|
496
|
+
"gemini-1.0-pro-latest.description": "Gemini 1.0 Pro 是 Google 所推出的高效能 AI 模型,專為大規模任務擴展而設計。",
|
|
497
|
+
"gemini-1.5-flash-001.description": "Gemini 1.5 Flash 001 是一款高效率的多模態模型,適用於廣泛應用的擴展場景。",
|
|
498
|
+
"gemini-1.5-flash-002.description": "Gemini 1.5 Flash 002 是一款高效率的多模態模型,專為大規模部署而打造。",
|
|
499
|
+
"gemini-1.5-flash-8b-exp-0924.description": "Gemini 1.5 Flash 8B 0924 是最新的實驗性模型,在文字與多模態應用上有顯著提升。",
|
|
500
|
+
"gemini-1.5-flash-8b-latest.description": "Gemini 1.5 Flash 8B 是一款高效率的多模態模型,專為大規模部署而設計。",
|
|
501
|
+
"gemini-1.5-flash-8b.description": "Gemini 1.5 Flash 8B 是一款高效率的多模態模型,適用於廣泛應用的擴展場景。",
|
|
502
|
+
"gemini-1.5-flash-exp-0827.description": "Gemini 1.5 Flash 0827 提供針對複雜任務的最佳化多模態處理能力。",
|
|
503
|
+
"gemini-1.5-flash-latest.description": "Gemini 1.5 Flash 是 Google 最新的多模態 AI 模型,具備快速處理能力,支援文字、圖像與影片輸入,能高效擴展至各類任務。",
|
|
504
|
+
"gemini-1.5-pro-001.description": "Gemini 1.5 Pro 001 是一款可擴展的多模態 AI 解決方案,適用於處理複雜任務。",
|
|
505
|
+
"gemini-1.5-pro-002.description": "Gemini 1.5 Pro 002 是最新的生產就緒模型,輸出品質更高,特別適用於數學、長上下文與視覺任務。",
|
|
506
|
+
"gemini-1.5-pro-exp-0801.description": "Gemini 1.5 Pro 0801 提供強大的多模態處理能力,並具備更高的應用開發彈性。",
|
|
507
|
+
"gemini-1.5-pro-exp-0827.description": "Gemini 1.5 Pro 0827 採用最新優化技術,提升多模態處理效率。",
|
|
508
|
+
"gemini-1.5-pro-latest.description": "Gemini 1.5 Pro 支援最多 200 萬個 token,是一款中型多模態模型,適合處理複雜任務。",
|
|
509
|
+
"gemini-2.0-flash-001.description": "Gemini 2.0 Flash 提供次世代功能,包括極速處理、原生工具使用、多模態生成,以及 100 萬 token 的上下文視窗。",
|
|
510
|
+
"gemini-2.0-flash-exp-image-generation.description": "Gemini 2.0 Flash 實驗性模型,支援圖像生成功能。",
|
|
511
|
+
"gemini-2.0-flash-exp.description": "Gemini 2.0 Flash 的一個變體,針對成本效益與低延遲進行最佳化。",
|
|
512
|
+
"gemini-2.0-flash-lite-001.description": "Gemini 2.0 Flash 的一個變體,針對成本效益與低延遲進行最佳化。",
|
|
513
|
+
"gemini-2.0-flash-lite.description": "Gemini 2.0 Flash 的一個變體,針對成本效益與低延遲進行最佳化。",
|
|
514
|
+
"gemini-2.0-flash.description": "Gemini 2.0 Flash 提供次世代功能,包括極速處理、原生工具使用、多模態生成,以及 100 萬 token 的上下文視窗。",
|
|
515
|
+
"gemini-2.5-flash-image-preview.description": "Nano Banana 是 Google 最新、最快且最有效率的原生多模態模型,支援對話式圖像生成與編輯。",
|
|
516
|
+
"gemini-2.5-flash-image-preview:image.description": "Nano Banana 是 Google 最新、最快且最有效率的原生多模態模型,支援對話式圖像生成與編輯。",
|
|
517
|
+
"gemini-2.5-flash-image.description": "Nano Banana 是 Google 最新、最快且最有效率的原生多模態模型,支援對話式圖像生成與編輯。",
|
|
518
|
+
"gemini-2.5-flash-image:image.description": "Nano Banana 是 Google 最新、最快且最有效率的原生多模態模型,支援對話式圖像生成與編輯。",
|
|
519
|
+
"gemini-2.5-flash-lite-preview-06-17.description": "Gemini 2.5 Flash-Lite Preview 是 Google 體積最小、性價比最高的模型,專為大規模應用而設計。",
|
|
520
|
+
"gemini-2.5-flash-lite-preview-09-2025.description": "Gemini 2.5 Flash-Lite 的預覽版本(2025 年 9 月 25 日)。",
|
|
521
|
+
"gemini-2.5-flash-lite.description": "Gemini 2.5 Flash-Lite 是 Google 體積最小、性價比最高的模型,專為大規模應用而設計。",
|
|
522
|
+
"gemini-2.5-flash-preview-04-17.description": "Gemini 2.5 Flash Preview 是 Google 功能最完整、性價比最高的模型。",
|
|
523
|
+
"gemini-2.5-flash-preview-09-2025.description": "Gemini 2.5 Flash 的預覽版本(2025 年 9 月 25 日)。",
|
|
524
|
+
"gemini-2.5-flash.description": "Gemini 2.5 Flash 是 Google 功能最完整、性價比最高的模型。",
|
|
525
|
+
"gemini-2.5-pro-preview-03-25.description": "Gemini 2.5 Pro Preview 是 Google 最先進的推理模型,能處理程式碼、數學與 STEM 問題,並分析大型資料集、程式碼庫與長篇文件。",
|
|
526
|
+
"gemini-2.5-pro-preview-05-06.description": "Gemini 2.5 Pro Preview 是 Google 最先進的推理模型,能處理程式碼、數學與 STEM 問題,並分析大型資料集、程式碼庫與長篇文件。",
|
|
527
|
+
"gemini-2.5-pro-preview-06-05.description": "Gemini 2.5 Pro Preview 是 Google 最先進的推理模型,能處理程式碼、數學與 STEM 問題,並分析大型資料集、程式碼庫與長篇文件。",
|
|
528
|
+
"gemini-2.5-pro.description": "Gemini 2.5 Pro 是 Google 的旗艦推理模型,支援長上下文,適用於處理複雜任務。",
|
|
494
529
|
"gemini-flash-latest.description": "Gemini Flash 最新版本",
|
|
495
530
|
"gemini-flash-lite-latest.description": "Gemini Flash-Lite 最新版本",
|
|
496
531
|
"gemini-pro-latest.description": "Gemini Pro 最新版本",
|
|
@@ -127,6 +127,10 @@
|
|
|
127
127
|
"llm.proxyUrl.title": "API 代理位址",
|
|
128
128
|
"llm.waitingForMore": "更多模型正在 <1>計劃接入</1> 中,敬請期待",
|
|
129
129
|
"llm.waitingForMoreLinkAriaLabel": "開啟模型服務商接入需求表單",
|
|
130
|
+
"marketPublish.forkConfirm.by": "由 {{author}}",
|
|
131
|
+
"marketPublish.forkConfirm.confirm": "確認發佈",
|
|
132
|
+
"marketPublish.forkConfirm.description": "您即將發佈一個基於社群中現有代理所衍生的版本。您的新代理將作為獨立項目發佈至市集。",
|
|
133
|
+
"marketPublish.forkConfirm.title": "發佈衍生代理",
|
|
130
134
|
"marketPublish.modal.changelog.extra": "描述此版本的主要變更與改進",
|
|
131
135
|
"marketPublish.modal.changelog.label": "變更日誌",
|
|
132
136
|
"marketPublish.modal.changelog.maxLengthError": "變更日誌不能超過 500 個字元",
|
|
@@ -524,6 +528,9 @@
|
|
|
524
528
|
"tools.klavis.servers": "個伺服器",
|
|
525
529
|
"tools.klavis.tools": "個工具",
|
|
526
530
|
"tools.klavis.verifyAuth": "我已完成驗證",
|
|
531
|
+
"tools.lobehubSkill.authorize": "授權",
|
|
532
|
+
"tools.lobehubSkill.connect": "連接",
|
|
533
|
+
"tools.lobehubSkill.error": "錯誤",
|
|
527
534
|
"tools.notInstalled": "尚未安裝",
|
|
528
535
|
"tools.notInstalledWarning": "目前外掛尚未安裝,可能會影響助手使用",
|
|
529
536
|
"tools.plugins.enabled": "已啟用 {{num}}",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.236",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -292,36 +292,6 @@ const anthropicChatModels: AIChatModelCard[] = [
|
|
|
292
292
|
},
|
|
293
293
|
type: 'chat',
|
|
294
294
|
},
|
|
295
|
-
{
|
|
296
|
-
abilities: {
|
|
297
|
-
functionCall: true,
|
|
298
|
-
vision: true,
|
|
299
|
-
},
|
|
300
|
-
contextWindowTokens: 200_000,
|
|
301
|
-
description:
|
|
302
|
-
'Claude 3 Opus is Anthropic’s most powerful model for highly complex tasks, excelling in performance, intelligence, fluency, and comprehension.',
|
|
303
|
-
displayName: 'Claude 3 Opus',
|
|
304
|
-
id: 'claude-3-opus-20240229',
|
|
305
|
-
maxOutput: 4096,
|
|
306
|
-
pricing: {
|
|
307
|
-
units: [
|
|
308
|
-
{ name: 'textInput_cacheRead', rate: 1.5, strategy: 'fixed', unit: 'millionTokens' },
|
|
309
|
-
{ name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
|
310
|
-
{ name: 'textOutput', rate: 75, strategy: 'fixed', unit: 'millionTokens' },
|
|
311
|
-
{
|
|
312
|
-
lookup: { prices: { '1h': 30, '5m': 18.75 }, pricingParams: ['ttl'] },
|
|
313
|
-
name: 'textInput_cacheWrite',
|
|
314
|
-
strategy: 'lookup',
|
|
315
|
-
unit: 'millionTokens',
|
|
316
|
-
},
|
|
317
|
-
],
|
|
318
|
-
},
|
|
319
|
-
releasedAt: '2024-02-29',
|
|
320
|
-
settings: {
|
|
321
|
-
extendParams: ['disableContextCaching'],
|
|
322
|
-
},
|
|
323
|
-
type: 'chat',
|
|
324
|
-
},
|
|
325
295
|
];
|
|
326
296
|
|
|
327
297
|
export const allModels = [...anthropicChatModels];
|
|
@@ -10,12 +10,13 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
|
10
10
|
vision: true,
|
|
11
11
|
},
|
|
12
12
|
config: {
|
|
13
|
-
deploymentName: 'doubao-seed-1-8-
|
|
13
|
+
deploymentName: 'doubao-seed-1-8-251228',
|
|
14
14
|
},
|
|
15
15
|
contextWindowTokens: 256_000,
|
|
16
16
|
description:
|
|
17
17
|
'Doubao-Seed-1.8 有着更强的多模态理解能力和 Agent 能力,支持文本/图片/视频输入与上下文缓存,可在复杂任务中提供更出色的表现。',
|
|
18
18
|
displayName: 'Doubao Seed 1.8',
|
|
19
|
+
enabled: true,
|
|
19
20
|
id: 'doubao-seed-1.8',
|
|
20
21
|
maxOutput: 64_000,
|
|
21
22
|
pricing: {
|
|
@@ -150,7 +150,6 @@ const ProviderConfig = memo<ProviderConfigProps>(
|
|
|
150
150
|
enabled,
|
|
151
151
|
isLoading,
|
|
152
152
|
configUpdating,
|
|
153
|
-
isFetchOnClient,
|
|
154
153
|
enableResponseApi,
|
|
155
154
|
isProviderEndpointNotEmpty,
|
|
156
155
|
isProviderApiKeyNotEmpty,
|
|
@@ -160,7 +159,6 @@ const ProviderConfig = memo<ProviderConfigProps>(
|
|
|
160
159
|
aiProviderSelectors.isProviderEnabled(id)(s),
|
|
161
160
|
aiProviderSelectors.isAiProviderConfigLoading(id)(s),
|
|
162
161
|
aiProviderSelectors.isProviderConfigUpdating(id)(s),
|
|
163
|
-
aiProviderSelectors.isProviderFetchOnClient(id)(s),
|
|
164
162
|
aiProviderSelectors.isProviderEnableResponseApi(id)(s),
|
|
165
163
|
aiProviderSelectors.isActiveProviderEndpointNotEmpty(s),
|
|
166
164
|
aiProviderSelectors.isActiveProviderApiKeyNotEmpty(s),
|
|
@@ -301,11 +299,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
|
|
|
301
299
|
(showEndpoint && isProviderEndpointNotEmpty) ||
|
|
302
300
|
(showApiKey && isProviderApiKeyNotEmpty));
|
|
303
301
|
const clientFetchItem = showClientFetch && {
|
|
304
|
-
children: isLoading ?
|
|
305
|
-
<SkeletonSwitch />
|
|
306
|
-
) : (
|
|
307
|
-
<Switch checked={isFetchOnClient} disabled={configUpdating} />
|
|
308
|
-
),
|
|
302
|
+
children: isLoading ? <SkeletonSwitch /> : <Switch loading={configUpdating} />,
|
|
309
303
|
desc: t('providerModels.config.fetchOnClient.desc'),
|
|
310
304
|
label: t('providerModels.config.fetchOnClient.title'),
|
|
311
305
|
minWidth: undefined,
|
|
@@ -14,15 +14,15 @@ export interface ResolvedContext {
|
|
|
14
14
|
}
|
|
15
15
|
|
|
16
16
|
/**
|
|
17
|
-
*
|
|
17
|
+
* Resolve conversation context
|
|
18
18
|
*
|
|
19
|
-
*
|
|
20
|
-
*
|
|
19
|
+
* Resolves agentId to sessionId (if agentId is provided)
|
|
20
|
+
* Priority: agentId > sessionId
|
|
21
21
|
*
|
|
22
|
-
* @param input -
|
|
23
|
-
* @param db -
|
|
24
|
-
* @param userId -
|
|
25
|
-
* @returns
|
|
22
|
+
* @param input - Input context parameters
|
|
23
|
+
* @param db - Database instance
|
|
24
|
+
* @param userId - User ID
|
|
25
|
+
* @returns Resolved context with sessionId resolved from agentId
|
|
26
26
|
*/
|
|
27
27
|
export const resolveContext = async (
|
|
28
28
|
input: ConversationContextInput,
|
|
@@ -31,7 +31,7 @@ export const resolveContext = async (
|
|
|
31
31
|
): Promise<ResolvedContext> => {
|
|
32
32
|
let resolvedSessionId: string | null = input.sessionId ?? null;
|
|
33
33
|
|
|
34
|
-
//
|
|
34
|
+
// If agentId is provided, prioritize looking up the corresponding sessionId from agentsToSessions table
|
|
35
35
|
if (input.agentId) {
|
|
36
36
|
const [relation] = await db
|
|
37
37
|
.select({ sessionId: agentsToSessions.sessionId })
|
|
@@ -213,7 +213,7 @@ export const agentRouter = router({
|
|
|
213
213
|
|
|
214
214
|
return [
|
|
215
215
|
...files
|
|
216
|
-
//
|
|
216
|
+
// Filter out all images
|
|
217
217
|
.filter((file) => !file.fileType.startsWith('image'))
|
|
218
218
|
.map((file) => ({
|
|
219
219
|
enabled: knowledge.files.some((item) => item.id === file.id),
|
|
@@ -13,7 +13,7 @@ import type { WorkflowContext } from '@/server/services/comfyui/types';
|
|
|
13
13
|
// Other RuntimeImageGenParams fields are passed through automatically
|
|
14
14
|
const ComfyUIParamsSchema = z
|
|
15
15
|
.object({
|
|
16
|
-
prompt: z.string(), //
|
|
16
|
+
prompt: z.string(), // Only validate required fields
|
|
17
17
|
})
|
|
18
18
|
.passthrough();
|
|
19
19
|
|
|
@@ -97,9 +97,9 @@ export const imageRouter = router({
|
|
|
97
97
|
|
|
98
98
|
log('Starting image creation process, input: %O', input);
|
|
99
99
|
|
|
100
|
-
//
|
|
100
|
+
// Normalize reference image addresses, store S3 keys uniformly (avoid storing expiring presigned URLs in database)
|
|
101
101
|
let configForDatabase = { ...params };
|
|
102
|
-
// 1)
|
|
102
|
+
// 1) Process multiple images in imageUrls
|
|
103
103
|
if (Array.isArray(params.imageUrls) && params.imageUrls.length > 0) {
|
|
104
104
|
log('Converting imageUrls to S3 keys for database storage: %O', params.imageUrls);
|
|
105
105
|
try {
|
|
@@ -119,7 +119,7 @@ export const imageRouter = router({
|
|
|
119
119
|
log('Keeping original imageUrls due to conversion error');
|
|
120
120
|
}
|
|
121
121
|
}
|
|
122
|
-
// 2)
|
|
122
|
+
// 2) Process single image in imageUrl
|
|
123
123
|
if (typeof params.imageUrl === 'string' && params.imageUrl) {
|
|
124
124
|
try {
|
|
125
125
|
const key = fileService.getKeyFromFullUrl(params.imageUrl);
|
|
@@ -127,11 +127,11 @@ export const imageRouter = router({
|
|
|
127
127
|
configForDatabase = { ...configForDatabase, imageUrl: key };
|
|
128
128
|
} catch (error) {
|
|
129
129
|
log('Error converting imageUrl to key: %O', error);
|
|
130
|
-
//
|
|
130
|
+
// Keep original value if conversion fails
|
|
131
131
|
}
|
|
132
132
|
}
|
|
133
133
|
|
|
134
|
-
//
|
|
134
|
+
// Defensive check: ensure no full URLs enter the database
|
|
135
135
|
validateNoUrlsInConfig(configForDatabase, 'configForDatabase');
|
|
136
136
|
|
|
137
137
|
const chargeResult = await chargeBeforeGenerate({
|
|
@@ -148,11 +148,11 @@ export const imageRouter = router({
|
|
|
148
148
|
return chargeResult;
|
|
149
149
|
}
|
|
150
150
|
|
|
151
|
-
//
|
|
151
|
+
// Step 1: Atomically create all database records in a transaction
|
|
152
152
|
const { batch: createdBatch, generationsWithTasks } = await serverDB.transaction(async (tx) => {
|
|
153
153
|
log('Starting database transaction for image generation');
|
|
154
154
|
|
|
155
|
-
// 1.
|
|
155
|
+
// 1. Create generationBatch
|
|
156
156
|
const newBatch: NewGenerationBatch = {
|
|
157
157
|
config: configForDatabase,
|
|
158
158
|
generationTopicId,
|
|
@@ -161,13 +161,13 @@ export const imageRouter = router({
|
|
|
161
161
|
prompt: params.prompt,
|
|
162
162
|
provider,
|
|
163
163
|
userId,
|
|
164
|
-
width: params.width, //
|
|
164
|
+
width: params.width, // Use converted config for database storage
|
|
165
165
|
};
|
|
166
166
|
log('Creating generation batch: %O', newBatch);
|
|
167
167
|
const [batch] = await tx.insert(generationBatches).values(newBatch).returning();
|
|
168
168
|
log('Generation batch created successfully: %s', batch.id);
|
|
169
169
|
|
|
170
|
-
// 2.
|
|
170
|
+
// 2. Create 4 generations (fixed at 4 images for phase one)
|
|
171
171
|
const seeds =
|
|
172
172
|
'seed' in params
|
|
173
173
|
? generateUniqueSeeds(imageNum)
|
|
@@ -187,11 +187,11 @@ export const imageRouter = router({
|
|
|
187
187
|
createdGenerations.map((g) => g.id),
|
|
188
188
|
);
|
|
189
189
|
|
|
190
|
-
// 3.
|
|
190
|
+
// 3. Concurrently create asyncTask for each generation (within transaction)
|
|
191
191
|
log('Creating async tasks for generations');
|
|
192
192
|
const generationsWithTasks = await Promise.all(
|
|
193
193
|
createdGenerations.map(async (generation) => {
|
|
194
|
-
//
|
|
194
|
+
// Create asyncTask directly in transaction
|
|
195
195
|
const [createdAsyncTask] = await tx
|
|
196
196
|
.insert(asyncTasks)
|
|
197
197
|
.values({
|
|
@@ -204,7 +204,7 @@ export const imageRouter = router({
|
|
|
204
204
|
const asyncTaskId = createdAsyncTask.id;
|
|
205
205
|
log('Created async task %s for generation %s', asyncTaskId, generation.id);
|
|
206
206
|
|
|
207
|
-
//
|
|
207
|
+
// Update generation's asyncTaskId
|
|
208
208
|
await tx
|
|
209
209
|
.update(generations)
|
|
210
210
|
.set({ asyncTaskId })
|
|
@@ -259,7 +259,7 @@ export const imageRouter = router({
|
|
|
259
259
|
console.error('[createImage] Failed to process async tasks:', e);
|
|
260
260
|
log('Failed to process async tasks: %O', e);
|
|
261
261
|
|
|
262
|
-
//
|
|
262
|
+
// If overall failure occurs, update all task statuses to failed
|
|
263
263
|
try {
|
|
264
264
|
await Promise.allSettled(
|
|
265
265
|
generationsWithTasks.map(({ asyncTaskId }) =>
|
|
@@ -36,7 +36,7 @@ export const klavisRouter = router({
|
|
|
36
36
|
.mutation(async ({ input, ctx }) => {
|
|
37
37
|
const { serverName, userId, identifier } = input;
|
|
38
38
|
|
|
39
|
-
//
|
|
39
|
+
// Create a single server instance
|
|
40
40
|
const response = await ctx.klavisClient.mcpServer.createServerInstance({
|
|
41
41
|
serverName: serverName as any,
|
|
42
42
|
userId,
|
|
@@ -44,11 +44,11 @@ export const klavisRouter = router({
|
|
|
44
44
|
|
|
45
45
|
const { serverUrl, instanceId, oauthUrl } = response;
|
|
46
46
|
|
|
47
|
-
//
|
|
47
|
+
// Get the tool list for this server
|
|
48
48
|
const toolsResponse = await ctx.klavisClient.mcpServer.getTools(serverName as any);
|
|
49
49
|
const tools = toolsResponse.tools || [];
|
|
50
50
|
|
|
51
|
-
//
|
|
51
|
+
// Save to database using the provided identifier (format: lowercase, spaces replaced with hyphens)
|
|
52
52
|
const manifest: LobeChatPluginManifest = {
|
|
53
53
|
api: tools.map((tool: any) => ({
|
|
54
54
|
description: tool.description || '',
|
|
@@ -64,8 +64,8 @@ export const klavisRouter = router({
|
|
|
64
64
|
type: 'default',
|
|
65
65
|
};
|
|
66
66
|
|
|
67
|
-
//
|
|
68
|
-
const isAuthenticated = !oauthUrl; //
|
|
67
|
+
// Save to database with oauthUrl and isAuthenticated status
|
|
68
|
+
const isAuthenticated = !oauthUrl; // If there's no oauthUrl, authentication is not required or already authenticated
|
|
69
69
|
await ctx.pluginModel.create({
|
|
70
70
|
customParams: {
|
|
71
71
|
klavis: {
|
|
@@ -104,10 +104,10 @@ export const klavisRouter = router({
|
|
|
104
104
|
}),
|
|
105
105
|
)
|
|
106
106
|
.mutation(async ({ input, ctx }) => {
|
|
107
|
-
//
|
|
107
|
+
// Call Klavis API to delete server instance
|
|
108
108
|
await ctx.klavisClient.mcpServer.deleteServerInstance(input.instanceId);
|
|
109
109
|
|
|
110
|
-
//
|
|
110
|
+
// Delete from database (using identifier)
|
|
111
111
|
await ctx.pluginModel.delete(input.identifier);
|
|
112
112
|
|
|
113
113
|
return { success: true };
|
|
@@ -200,10 +200,10 @@ export const klavisRouter = router({
|
|
|
200
200
|
const { identifier, serverName, serverUrl, instanceId, tools, isAuthenticated, oauthUrl } =
|
|
201
201
|
input;
|
|
202
202
|
|
|
203
|
-
//
|
|
203
|
+
// Get existing plugin (using identifier)
|
|
204
204
|
const existingPlugin = await ctx.pluginModel.findById(identifier);
|
|
205
205
|
|
|
206
|
-
//
|
|
206
|
+
// Build manifest containing all tools
|
|
207
207
|
const manifest: LobeChatPluginManifest = {
|
|
208
208
|
api: tools.map((tool) => ({
|
|
209
209
|
description: tool.description || '',
|
|
@@ -229,7 +229,7 @@ export const klavisRouter = router({
|
|
|
229
229
|
},
|
|
230
230
|
};
|
|
231
231
|
|
|
232
|
-
//
|
|
232
|
+
// Update or create plugin
|
|
233
233
|
if (existingPlugin) {
|
|
234
234
|
await ctx.pluginModel.update(identifier, { customParams, manifest });
|
|
235
235
|
} else {
|
|
@@ -559,11 +559,11 @@ export const marketRouter = router({
|
|
|
559
559
|
log('get access token, expiresIn value:', expiresIn);
|
|
560
560
|
log('expiresIn type:', typeof expiresIn);
|
|
561
561
|
|
|
562
|
-
const expirationTime = new Date(Date.now() + (expiresIn - 60) * 1000); //
|
|
562
|
+
const expirationTime = new Date(Date.now() + (expiresIn - 60) * 1000); // Expire 60 seconds early
|
|
563
563
|
|
|
564
564
|
log('expirationTime:', expirationTime.toISOString());
|
|
565
565
|
|
|
566
|
-
//
|
|
566
|
+
// Set HTTP-Only Cookie to store the actual access token
|
|
567
567
|
const tokenCookie = serialize('mp_token', accessToken, {
|
|
568
568
|
expires: expirationTime,
|
|
569
569
|
httpOnly: true,
|
|
@@ -572,7 +572,7 @@ export const marketRouter = router({
|
|
|
572
572
|
secure: process.env.NODE_ENV === 'production',
|
|
573
573
|
});
|
|
574
574
|
|
|
575
|
-
//
|
|
575
|
+
// Set client-readable status marker cookie (without actual token)
|
|
576
576
|
const statusCookie = serialize('mp_token_status', 'active', {
|
|
577
577
|
expires: expirationTime,
|
|
578
578
|
httpOnly: false,
|
|
@@ -581,7 +581,7 @@ export const marketRouter = router({
|
|
|
581
581
|
secure: process.env.NODE_ENV === 'production',
|
|
582
582
|
});
|
|
583
583
|
|
|
584
|
-
//
|
|
584
|
+
// Set Set-Cookie header via context's resHeaders
|
|
585
585
|
ctx.resHeaders?.append('Set-Cookie', tokenCookie);
|
|
586
586
|
ctx.resHeaders?.append('Set-Cookie', statusCookie);
|
|
587
587
|
|
|
@@ -650,7 +650,7 @@ export const marketRouter = router({
|
|
|
650
650
|
return { success: true };
|
|
651
651
|
} catch (error) {
|
|
652
652
|
console.error('Error reporting call: %O', error);
|
|
653
|
-
//
|
|
653
|
+
// Don't throw error, as reporting failure should not affect main flow
|
|
654
654
|
return { success: false };
|
|
655
655
|
}
|
|
656
656
|
}),
|
|
@@ -678,7 +678,7 @@ export const marketRouter = router({
|
|
|
678
678
|
return { success: true };
|
|
679
679
|
} catch (error) {
|
|
680
680
|
log('Error reporting MCP installation result: %O', error);
|
|
681
|
-
//
|
|
681
|
+
// Don't throw error, as reporting failure should not affect main flow
|
|
682
682
|
return { success: false };
|
|
683
683
|
}
|
|
684
684
|
}),
|
|
@@ -75,13 +75,13 @@ export const messageRouter = router({
|
|
|
75
75
|
createMessage: messageProcedure
|
|
76
76
|
.input(CreateNewMessageParamsSchema)
|
|
77
77
|
.mutation(async ({ input, ctx }) => {
|
|
78
|
-
//
|
|
78
|
+
// If there's no agentId but has sessionId, resolve agentId from sessionId
|
|
79
79
|
let agentId = input.agentId;
|
|
80
80
|
if (!agentId && input.sessionId) {
|
|
81
81
|
agentId = (await resolveAgentIdFromSession(input.sessionId, ctx.serverDB, ctx.userId))!;
|
|
82
82
|
}
|
|
83
83
|
|
|
84
|
-
//
|
|
84
|
+
// Create message with the resolved agentId
|
|
85
85
|
return ctx.messageService.createMessage({ ...input, agentId } as any);
|
|
86
86
|
}),
|
|
87
87
|
|
|
@@ -65,7 +65,7 @@ export const pluginRouter = router({
|
|
|
65
65
|
return data.identifier;
|
|
66
66
|
}),
|
|
67
67
|
|
|
68
|
-
// TODO:
|
|
68
|
+
// TODO: In the future, this method also needs to use authedProcedure
|
|
69
69
|
getPlugins: publicProcedure.query(async ({ ctx }): Promise<LobeTool[]> => {
|
|
70
70
|
if (!ctx.userId) return [];
|
|
71
71
|
|
|
@@ -251,7 +251,7 @@ export const ragEvalRouter = router({
|
|
|
251
251
|
const isSuccess = records.every((record) => record.status === EvalEvaluationStatus.Success);
|
|
252
252
|
|
|
253
253
|
if (isSuccess) {
|
|
254
|
-
//
|
|
254
|
+
// Upload results to S3
|
|
255
255
|
|
|
256
256
|
const evalRecords = records.map((record) => ({
|
|
257
257
|
question: record.question,
|
|
@@ -265,7 +265,7 @@ export const ragEvalRouter = router({
|
|
|
265
265
|
|
|
266
266
|
await ctx.fileService.uploadContent(path, JSONL.stringify(evalRecords));
|
|
267
267
|
|
|
268
|
-
//
|
|
268
|
+
// Save data
|
|
269
269
|
await ctx.evaluationModel.update(input.id, {
|
|
270
270
|
status: EvalEvaluationStatus.Success,
|
|
271
271
|
evalRecordsUrl: await ctx.fileService.getFullFileUrl(path),
|
|
@@ -49,7 +49,7 @@ export const topicRouter = router({
|
|
|
49
49
|
),
|
|
50
50
|
)
|
|
51
51
|
.mutation(async ({ input, ctx }): Promise<BatchTaskResult> => {
|
|
52
|
-
//
|
|
52
|
+
// Resolve sessionId for each topic
|
|
53
53
|
const resolvedTopics = await Promise.all(
|
|
54
54
|
input.map(async (item) => {
|
|
55
55
|
const { agentId, ...rest } = item;
|
|
@@ -162,7 +162,7 @@ export const topicRouter = router({
|
|
|
162
162
|
return { items: result.items, total: result.total };
|
|
163
163
|
}
|
|
164
164
|
|
|
165
|
-
//
|
|
165
|
+
// If sessionId is provided but no agentId, need to reverse lookup agentId
|
|
166
166
|
let effectiveAgentId = rest.agentId;
|
|
167
167
|
if (!effectiveAgentId && sessionId) {
|
|
168
168
|
effectiveAgentId = await resolveAgentIdFromSession(sessionId, ctx.serverDB, ctx.userId);
|
|
@@ -430,7 +430,7 @@ export const topicRouter = router({
|
|
|
430
430
|
.mutation(async ({ input, ctx }) => {
|
|
431
431
|
const { agentId, ...restValue } = input.value;
|
|
432
432
|
|
|
433
|
-
//
|
|
433
|
+
// If agentId is provided, resolve to sessionId
|
|
434
434
|
let resolvedSessionId = restValue.sessionId;
|
|
435
435
|
if (agentId && !resolvedSessionId) {
|
|
436
436
|
const resolved = await resolveContext({ agentId }, ctx.serverDB, ctx.userId);
|
|
@@ -150,7 +150,7 @@ export const userRouter = router({
|
|
|
150
150
|
firstName: state.firstName,
|
|
151
151
|
fullName: state.fullName,
|
|
152
152
|
|
|
153
|
-
//
|
|
153
|
+
// Has conversation if there are messages or has created any assistant
|
|
154
154
|
hasConversation: hasAnyMessages || hasExtraSession,
|
|
155
155
|
|
|
156
156
|
interests: state.interests,
|
|
@@ -190,40 +190,40 @@ export const userRouter = router({
|
|
|
190
190
|
}),
|
|
191
191
|
|
|
192
192
|
updateAvatar: userProcedure.input(z.string()).mutation(async ({ ctx, input }) => {
|
|
193
|
-
//
|
|
193
|
+
// If it's Base64 data, need to upload to S3
|
|
194
194
|
if (input.startsWith('data:image')) {
|
|
195
195
|
try {
|
|
196
|
-
//
|
|
196
|
+
// Extract mimeType, e.g., "image/png"
|
|
197
197
|
const prefix = 'data:';
|
|
198
198
|
const semicolonIndex = input.indexOf(';');
|
|
199
199
|
const mimeType =
|
|
200
200
|
semicolonIndex !== -1 ? input.slice(prefix.length, semicolonIndex) : 'image/png';
|
|
201
201
|
const fileType = mimeType.split('/')[1];
|
|
202
202
|
|
|
203
|
-
//
|
|
203
|
+
// Split string to get the Base64 part
|
|
204
204
|
const commaIndex = input.indexOf(',');
|
|
205
205
|
if (commaIndex === -1) {
|
|
206
206
|
throw new Error('Invalid Base64 data');
|
|
207
207
|
}
|
|
208
208
|
const base64Data = input.slice(commaIndex + 1);
|
|
209
209
|
|
|
210
|
-
//
|
|
210
|
+
// Create S3 client
|
|
211
211
|
const s3 = new FileS3();
|
|
212
212
|
|
|
213
|
-
//
|
|
214
|
-
//
|
|
213
|
+
// Use UUID to generate unique filename to prevent caching issues
|
|
214
|
+
// Get old avatar URL for later deletion
|
|
215
215
|
const userState = await ctx.userModel.getUserState(KeyVaultsGateKeeper.getUserKeyVaults);
|
|
216
216
|
const oldAvatarUrl = userState.avatar;
|
|
217
217
|
|
|
218
218
|
const fileName = `${uuidv4()}.${fileType}`;
|
|
219
219
|
const filePath = `user/avatar/${ctx.userId}/${fileName}`;
|
|
220
220
|
|
|
221
|
-
//
|
|
221
|
+
// Convert Base64 data to Buffer and upload to S3
|
|
222
222
|
const buffer = Buffer.from(base64Data, 'base64');
|
|
223
223
|
|
|
224
224
|
await s3.uploadBuffer(filePath, buffer, mimeType);
|
|
225
225
|
|
|
226
|
-
//
|
|
226
|
+
// Delete old avatar
|
|
227
227
|
if (oldAvatarUrl && oldAvatarUrl.startsWith('/webapi/')) {
|
|
228
228
|
const oldFilePath = oldAvatarUrl.replace('/webapi/', '');
|
|
229
229
|
await s3.deleteFile(oldFilePath);
|
|
@@ -239,7 +239,7 @@ export const userRouter = router({
|
|
|
239
239
|
}
|
|
240
240
|
}
|
|
241
241
|
|
|
242
|
-
//
|
|
242
|
+
// If it's not Base64 data, directly use URL to update user avatar
|
|
243
243
|
return ctx.userModel.updateUser({ avatar: input });
|
|
244
244
|
}),
|
|
245
245
|
|
|
@@ -305,11 +305,11 @@ export const userMemoriesRouter = router({
|
|
|
305
305
|
}
|
|
306
306
|
}),
|
|
307
307
|
|
|
308
|
-
// REVIEW
|
|
309
|
-
// REVIEW
|
|
310
|
-
// REVIEW
|
|
311
|
-
//
|
|
312
|
-
//
|
|
308
|
+
// REVIEW: Extract memories directly from current topic
|
|
309
|
+
// REVIEW: We need a function implementation that can be triggered both by cron and manually by users for "daily/weekly/periodic" memory extraction/generation
|
|
310
|
+
// REVIEW: Scheduled task
|
|
311
|
+
// Don't use tRPC, use server/service directly
|
|
312
|
+
// Reference: https://github.com/lobehub/lobe-chat-cloud/blob/886ff2fcd44b7b00a3aa8906f84914a6dcaa1815/src/app/(backend)/cron/reset-budgets/route.ts#L214
|
|
313
313
|
reEmbedMemories: memoryProcedure
|
|
314
314
|
.input(reEmbedInputSchema.optional())
|
|
315
315
|
.mutation(async ({ ctx, input }) => {
|
|
@@ -740,7 +740,7 @@ export const userMemoriesRouter = router({
|
|
|
740
740
|
}
|
|
741
741
|
}),
|
|
742
742
|
|
|
743
|
-
// REVIEW:
|
|
743
|
+
// REVIEW: Need to implement tool memory api
|
|
744
744
|
toolAddContextMemory: memoryProcedure
|
|
745
745
|
.input(ContextMemoryItemSchema)
|
|
746
746
|
.mutation(async ({ input, ctx }) => {
|