@lobehub/lobehub 2.0.0-next.111 → 2.0.0-next.112
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +33 -0
- package/changelog/v1.json +9 -0
- package/package.json +1 -1
- package/packages/database/src/models/agent.ts +8 -4
- package/packages/database/src/schemas/file.ts +5 -10
- package/packages/model-bank/src/aiModels/qwen.ts +41 -3
- package/packages/model-runtime/src/providers/qwen/index.ts +9 -3
- package/src/server/routers/lambda/chunk.ts +6 -1
- package/src/store/chat/slices/builtinTool/actions/knowledgeBase.ts +0 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,39 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
## [Version 2.0.0-next.112](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.111...v2.0.0-next.112)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2025-11-24**</sup>
|
|
8
|
+
|
|
9
|
+
#### ♻ Code Refactoring
|
|
10
|
+
|
|
11
|
+
- **misc**: Optimize files schema definition.
|
|
12
|
+
|
|
13
|
+
#### 💄 Styles
|
|
14
|
+
|
|
15
|
+
- **misc**: Add Kimi K2 Thinking to Qwen Provider.
|
|
16
|
+
|
|
17
|
+
<br/>
|
|
18
|
+
|
|
19
|
+
<details>
|
|
20
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
21
|
+
|
|
22
|
+
#### Code refactoring
|
|
23
|
+
|
|
24
|
+
- **misc**: Optimize files schema definition, closes [#10403](https://github.com/lobehub/lobe-chat/issues/10403) ([cf28c87](https://github.com/lobehub/lobe-chat/commit/cf28c87))
|
|
25
|
+
|
|
26
|
+
#### Styles
|
|
27
|
+
|
|
28
|
+
- **misc**: Add Kimi K2 Thinking to Qwen Provider, closes [#10287](https://github.com/lobehub/lobe-chat/issues/10287) ([bd2e838](https://github.com/lobehub/lobe-chat/commit/bd2e838))
|
|
29
|
+
|
|
30
|
+
</details>
|
|
31
|
+
|
|
32
|
+
<div align="right">
|
|
33
|
+
|
|
34
|
+
[](#readme-top)
|
|
35
|
+
|
|
36
|
+
</div>
|
|
37
|
+
|
|
5
38
|
## [Version 2.0.0-next.111](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.110...v2.0.0-next.111)
|
|
6
39
|
|
|
7
40
|
<sup>Released on **2025-11-24**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.112",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -26,7 +26,12 @@ export class AgentModel {
|
|
|
26
26
|
const knowledge = await this.getAgentAssignedKnowledge(id);
|
|
27
27
|
|
|
28
28
|
// Fetch document content for enabled files
|
|
29
|
-
const enabledFileIds = knowledge.files
|
|
29
|
+
const enabledFileIds = knowledge.files
|
|
30
|
+
.filter((f) => f.enabled)
|
|
31
|
+
.map((f) => f.id)
|
|
32
|
+
.filter((id) => id !== undefined);
|
|
33
|
+
let files: Array<(typeof knowledge.files)[number] & { content?: string | null }> =
|
|
34
|
+
knowledge.files;
|
|
30
35
|
|
|
31
36
|
if (enabledFileIds.length > 0) {
|
|
32
37
|
const documentsData = await this.db.query.documents.findMany({
|
|
@@ -34,14 +39,13 @@ export class AgentModel {
|
|
|
34
39
|
});
|
|
35
40
|
|
|
36
41
|
const documentMap = new Map(documentsData.map((doc) => [doc.fileId, doc.content]));
|
|
37
|
-
|
|
38
|
-
knowledge.files = knowledge.files.map((file) => ({
|
|
42
|
+
files = knowledge.files.map((file) => ({
|
|
39
43
|
...file,
|
|
40
44
|
content: file.enabled && file.id ? documentMap.get(file.id) : undefined,
|
|
41
45
|
}));
|
|
42
46
|
}
|
|
43
47
|
|
|
44
|
-
return { ...agent, ...knowledge };
|
|
48
|
+
return { ...agent, ...knowledge, files };
|
|
45
49
|
};
|
|
46
50
|
|
|
47
51
|
getAgentAssignedKnowledge = async (id: string) => {
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/* eslint-disable sort-keys-fix/sort-keys-fix */
|
|
2
2
|
import { isNotNull } from 'drizzle-orm';
|
|
3
3
|
import {
|
|
4
|
+
AnyPgColumn,
|
|
4
5
|
boolean,
|
|
5
6
|
index,
|
|
6
7
|
integer,
|
|
@@ -41,7 +42,6 @@ export type GlobalFileItem = typeof globalFiles.$inferSelect;
|
|
|
41
42
|
/**
|
|
42
43
|
* Documents table - Stores file content or web search results
|
|
43
44
|
*/
|
|
44
|
-
// @ts-ignore
|
|
45
45
|
export const documents = pgTable(
|
|
46
46
|
'documents',
|
|
47
47
|
{
|
|
@@ -72,15 +72,12 @@ export const documents = pgTable(
|
|
|
72
72
|
source: text('source').notNull(), // File path or web URL
|
|
73
73
|
|
|
74
74
|
// Associated file (optional)
|
|
75
|
-
//
|
|
75
|
+
// forward reference needs AnyPgColumn to avoid circular type inference
|
|
76
76
|
// eslint-disable-next-line @typescript-eslint/no-use-before-define
|
|
77
|
-
|
|
78
|
-
// eslint-disable-next-line @typescript-eslint/no-use-before-define
|
|
79
|
-
fileId: text('file_id').references(() => files.id, { onDelete: 'set null' }),
|
|
77
|
+
fileId: text('file_id').references((): AnyPgColumn => files.id, { onDelete: 'set null' }),
|
|
80
78
|
|
|
81
79
|
// Parent document (for folder hierarchy structure)
|
|
82
|
-
|
|
83
|
-
parentId: varchar('parent_id', { length: 255 }).references(() => documents.id, {
|
|
80
|
+
parentId: varchar('parent_id', { length: 255 }).references((): AnyPgColumn => documents.id, {
|
|
84
81
|
onDelete: 'set null',
|
|
85
82
|
}),
|
|
86
83
|
|
|
@@ -113,7 +110,6 @@ export type NewDocument = typeof documents.$inferInsert;
|
|
|
113
110
|
export type DocumentItem = typeof documents.$inferSelect;
|
|
114
111
|
export const insertDocumentSchema = createInsertSchema(documents);
|
|
115
112
|
|
|
116
|
-
// @ts-ignore
|
|
117
113
|
export const files = pgTable(
|
|
118
114
|
'files',
|
|
119
115
|
{
|
|
@@ -140,8 +136,7 @@ export const files = pgTable(
|
|
|
140
136
|
source: text('source').$type<FileSource>(),
|
|
141
137
|
|
|
142
138
|
// Parent Folder or Document
|
|
143
|
-
|
|
144
|
-
parentId: varchar('parent_id', { length: 255 }).references(() => documents.id, {
|
|
139
|
+
parentId: varchar('parent_id', { length: 255 }).references((): AnyPgColumn => documents.id, {
|
|
145
140
|
onDelete: 'set null',
|
|
146
141
|
}),
|
|
147
142
|
|
|
@@ -3,6 +3,31 @@ import { AIChatModelCard, AIImageModelCard } from '../types/aiModel';
|
|
|
3
3
|
// https://help.aliyun.com/zh/model-studio/models?spm=a2c4g.11186623
|
|
4
4
|
|
|
5
5
|
const qwenChatModels: AIChatModelCard[] = [
|
|
6
|
+
{
|
|
7
|
+
abilities: {
|
|
8
|
+
functionCall: true,
|
|
9
|
+
reasoning: true,
|
|
10
|
+
},
|
|
11
|
+
contextWindowTokens: 262_144,
|
|
12
|
+
description:
|
|
13
|
+
'kimi-k2-thinking模型是月之暗面提供的具有通用 Agentic能力和推理能力的思考模型,它擅长深度推理,并可通过多步工具调用,帮助解决各类难题。',
|
|
14
|
+
displayName: 'Kimi K2 Thinking',
|
|
15
|
+
id: 'kimi-k2-thinking',
|
|
16
|
+
maxOutput: 16_384,
|
|
17
|
+
organization: 'Qwen',
|
|
18
|
+
pricing: {
|
|
19
|
+
currency: 'CNY',
|
|
20
|
+
units: [
|
|
21
|
+
{ name: 'textInput', rate: 4, strategy: 'fixed', unit: 'millionTokens' },
|
|
22
|
+
{ name: 'textOutput', rate: 16, strategy: 'fixed', unit: 'millionTokens' },
|
|
23
|
+
],
|
|
24
|
+
},
|
|
25
|
+
releasedAt: '2025-11-10',
|
|
26
|
+
settings: {
|
|
27
|
+
extendParams: ['reasoningBudgetToken'],
|
|
28
|
+
},
|
|
29
|
+
type: 'chat',
|
|
30
|
+
},
|
|
6
31
|
{
|
|
7
32
|
abilities: {
|
|
8
33
|
reasoning: true,
|
|
@@ -109,6 +134,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
109
134
|
{
|
|
110
135
|
abilities: {
|
|
111
136
|
reasoning: true,
|
|
137
|
+
search: true,
|
|
112
138
|
},
|
|
113
139
|
contextWindowTokens: 131_072,
|
|
114
140
|
description:
|
|
@@ -131,6 +157,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
131
157
|
{
|
|
132
158
|
abilities: {
|
|
133
159
|
reasoning: true,
|
|
160
|
+
search: true,
|
|
134
161
|
},
|
|
135
162
|
contextWindowTokens: 131_072,
|
|
136
163
|
description: 'DeepSeek V3.1 模型为混合推理架构模型,同时支持思考模式与非思考模式。',
|
|
@@ -151,6 +178,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
151
178
|
},
|
|
152
179
|
{
|
|
153
180
|
abilities: {
|
|
181
|
+
functionCall: true,
|
|
154
182
|
search: true,
|
|
155
183
|
},
|
|
156
184
|
contextWindowTokens: 131_072,
|
|
@@ -1231,6 +1259,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1231
1259
|
},
|
|
1232
1260
|
{
|
|
1233
1261
|
abilities: {
|
|
1262
|
+
reasoning: true,
|
|
1234
1263
|
vision: true,
|
|
1235
1264
|
},
|
|
1236
1265
|
config: {
|
|
@@ -1239,7 +1268,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1239
1268
|
contextWindowTokens: 65_536,
|
|
1240
1269
|
description:
|
|
1241
1270
|
'Qwen-Omni 模型能够接收文本、图片、音频、视频等多种模态的组合输入,并生成文本或语音形式的回复, 提供多种拟人音色,支持多语言和方言的语音输出,可应用于文本创作、视觉识别、语音助手等场景。',
|
|
1242
|
-
displayName: '
|
|
1271
|
+
displayName: 'Qwen3 Omni Flash',
|
|
1243
1272
|
id: 'qwen3-omni-flash',
|
|
1244
1273
|
maxOutput: 16_384,
|
|
1245
1274
|
organization: 'Qwen',
|
|
@@ -1432,6 +1461,9 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1432
1461
|
{ name: 'textOutput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
|
|
1433
1462
|
],
|
|
1434
1463
|
},
|
|
1464
|
+
settings: {
|
|
1465
|
+
extendParams: ['reasoningBudgetToken'],
|
|
1466
|
+
},
|
|
1435
1467
|
type: 'chat',
|
|
1436
1468
|
},
|
|
1437
1469
|
{
|
|
@@ -1473,7 +1505,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1473
1505
|
],
|
|
1474
1506
|
},
|
|
1475
1507
|
settings: {
|
|
1476
|
-
extendParams: ['
|
|
1508
|
+
extendParams: ['reasoningBudgetToken'],
|
|
1477
1509
|
},
|
|
1478
1510
|
type: 'chat',
|
|
1479
1511
|
},
|
|
@@ -1517,7 +1549,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1517
1549
|
],
|
|
1518
1550
|
},
|
|
1519
1551
|
settings: {
|
|
1520
|
-
extendParams: ['
|
|
1552
|
+
extendParams: ['reasoningBudgetToken'],
|
|
1521
1553
|
},
|
|
1522
1554
|
type: 'chat',
|
|
1523
1555
|
},
|
|
@@ -1976,7 +2008,9 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1976
2008
|
},
|
|
1977
2009
|
{
|
|
1978
2010
|
abilities: {
|
|
2011
|
+
functionCall: true,
|
|
1979
2012
|
reasoning: true,
|
|
2013
|
+
search: true,
|
|
1980
2014
|
},
|
|
1981
2015
|
contextWindowTokens: 131_072,
|
|
1982
2016
|
description:
|
|
@@ -1996,6 +2030,10 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
|
1996
2030
|
type: 'chat',
|
|
1997
2031
|
},
|
|
1998
2032
|
{
|
|
2033
|
+
abilities: {
|
|
2034
|
+
functionCall: true,
|
|
2035
|
+
search: true,
|
|
2036
|
+
},
|
|
1999
2037
|
contextWindowTokens: 65_536,
|
|
2000
2038
|
description:
|
|
2001
2039
|
'DeepSeek-V3 为自研 MoE 模型,671B 参数,激活 37B,在 14.8T token 上进行了预训练,在长文本、代码、数学、百科、中文能力上表现优秀。',
|
|
@@ -51,9 +51,15 @@ export const LobeQwenAI = createOpenAICompatibleRuntime({
|
|
|
51
51
|
thinking_budget:
|
|
52
52
|
thinking?.budget_tokens === 0 ? 0 : thinking?.budget_tokens || undefined,
|
|
53
53
|
}
|
|
54
|
-
: [
|
|
55
|
-
|
|
56
|
-
|
|
54
|
+
: [
|
|
55
|
+
'qwen3',
|
|
56
|
+
'qwen-turbo',
|
|
57
|
+
'qwen-plus',
|
|
58
|
+
'qwen-flash',
|
|
59
|
+
'deepseek-v3.1',
|
|
60
|
+
'deepseek-v3.2',
|
|
61
|
+
'glm',
|
|
62
|
+
].some((keyword) => model.toLowerCase().includes(keyword))
|
|
57
63
|
? {
|
|
58
64
|
enable_thinking: thinking !== undefined ? thinking.type === 'enabled' : false,
|
|
59
65
|
thinking_budget:
|
|
@@ -162,7 +162,12 @@ export const chunkRouter = router({
|
|
|
162
162
|
}
|
|
163
163
|
|
|
164
164
|
// 2. Find existing parsed document
|
|
165
|
-
let document
|
|
165
|
+
let document:
|
|
166
|
+
| {
|
|
167
|
+
content: string | null;
|
|
168
|
+
metadata: Record<string, any> | null;
|
|
169
|
+
}
|
|
170
|
+
| undefined = await ctx.documentModel.findByFileId(fileId);
|
|
166
171
|
|
|
167
172
|
// 3. If not exists, parse the file
|
|
168
173
|
if (!document) {
|
|
@@ -6,7 +6,6 @@ import { getAgentStoreState } from '@/store/agent/store';
|
|
|
6
6
|
import { ChatStore } from '@/store/chat/store';
|
|
7
7
|
import { KnowledgeBaseExecutionRuntime } from '@/tools/knowledge-base/ExecutionRuntime';
|
|
8
8
|
|
|
9
|
-
|
|
10
9
|
const log = debug('lobe-store:builtin-tool:knowledge-base');
|
|
11
10
|
|
|
12
11
|
export interface KnowledgeBaseAction {
|