@lobehub/lobehub 2.0.0-next.247 → 2.0.0-next.248
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +5 -0
- package/package.json +1 -1
- package/packages/utils/src/folderStructure.test.ts +310 -0
- package/src/app/(backend)/api/webhooks/memory-extraction/route.ts +2 -1
- package/src/app/(backend)/api/workflows/memory-user-memory/pipelines/chat-topic/process-user-topics/route.ts +5 -2
- package/src/app/(backend)/api/workflows/memory-user-memory/pipelines/chat-topic/process-users/route.ts +5 -2
- package/src/server/globalConfig/parseMemoryExtractionConfig.ts +12 -0
- package/src/server/routers/async/caller.ts +3 -3
- package/src/server/routers/async/image.ts +1 -1
- package/src/server/routers/async/ragEval.ts +3 -3
- package/src/server/routers/lambda/_helpers/resolveContext.ts +11 -11
- package/src/server/routers/lambda/_schema/context.ts +6 -6
- package/src/server/services/memory/userMemory/extract.ts +6 -6
- package/src/server/sitemap.ts +12 -12
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,31 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
## [Version 2.0.0-next.248](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.247...v2.0.0-next.248)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2026-01-09**</sup>
|
|
8
|
+
|
|
9
|
+
#### ✨ Features
|
|
10
|
+
|
|
11
|
+
- **userMemories**: Support to assign for extra headers when invoking upstash workflows.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### What's improved
|
|
19
|
+
|
|
20
|
+
- **userMemories**: Support to assign for extra headers when invoking upstash workflows, closes [#11374](https://github.com/lobehub/lobe-chat/issues/11374) ([895e15e](https://github.com/lobehub/lobe-chat/commit/895e15e))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
5
30
|
## [Version 2.0.0-next.247](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.246...v2.0.0-next.247)
|
|
6
31
|
|
|
7
32
|
<sup>Released on **2026-01-09**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/lobehub",
|
|
3
|
-
"version": "2.0.0-next.
|
|
3
|
+
"version": "2.0.0-next.248",
|
|
4
4
|
"description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
|
2
|
+
|
|
3
|
+
import { buildFolderTree, sanitizeFolderName, topologicalSortFolders } from './folderStructure';
|
|
4
|
+
|
|
5
|
+
describe('folderStructure', () => {
|
|
6
|
+
describe('buildFolderTree', () => {
|
|
7
|
+
it('should handle single files without folders', () => {
|
|
8
|
+
const files = [new File(['content1'], 'file1.txt'), new File(['content2'], 'file2.txt')];
|
|
9
|
+
|
|
10
|
+
const result = buildFolderTree(files);
|
|
11
|
+
|
|
12
|
+
expect(result.folders).toEqual({});
|
|
13
|
+
expect(result.filesByFolder['']).toHaveLength(2);
|
|
14
|
+
expect(result.filesByFolder[''][0].name).toBe('file1.txt');
|
|
15
|
+
expect(result.filesByFolder[''][1].name).toBe('file2.txt');
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
it('should build folder structure from files with webkitRelativePath', () => {
|
|
19
|
+
const file1 = new File(['content'], 'file.txt');
|
|
20
|
+
(file1 as any).webkitRelativePath = 'folder1/file.txt';
|
|
21
|
+
|
|
22
|
+
const file2 = new File(['content'], 'file2.txt');
|
|
23
|
+
(file2 as any).webkitRelativePath = 'folder1/file2.txt';
|
|
24
|
+
|
|
25
|
+
const result = buildFolderTree([file1, file2]);
|
|
26
|
+
|
|
27
|
+
expect(result.folders).toEqual({
|
|
28
|
+
folder1: {
|
|
29
|
+
name: 'folder1',
|
|
30
|
+
parent: null,
|
|
31
|
+
},
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
expect(result.filesByFolder['folder1']).toHaveLength(2);
|
|
35
|
+
expect(result.filesByFolder['folder1'][0].name).toBe('file.txt');
|
|
36
|
+
expect(result.filesByFolder['folder1'][1].name).toBe('file2.txt');
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
it('should handle nested folder structures', () => {
|
|
40
|
+
const file1 = new File(['content'], 'deep.txt');
|
|
41
|
+
(file1 as any).webkitRelativePath = 'folder1/subfolder1/subfolder2/deep.txt';
|
|
42
|
+
|
|
43
|
+
const result = buildFolderTree([file1]);
|
|
44
|
+
|
|
45
|
+
expect(result.folders).toEqual({
|
|
46
|
+
'folder1': {
|
|
47
|
+
name: 'folder1',
|
|
48
|
+
parent: null,
|
|
49
|
+
},
|
|
50
|
+
'folder1/subfolder1': {
|
|
51
|
+
name: 'subfolder1',
|
|
52
|
+
parent: 'folder1',
|
|
53
|
+
},
|
|
54
|
+
'folder1/subfolder1/subfolder2': {
|
|
55
|
+
name: 'subfolder2',
|
|
56
|
+
parent: 'folder1/subfolder1',
|
|
57
|
+
},
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
expect(result.filesByFolder['folder1/subfolder1/subfolder2']).toHaveLength(1);
|
|
61
|
+
expect(result.filesByFolder['folder1/subfolder1/subfolder2'][0].name).toBe('deep.txt');
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
it('should handle multiple files in different folders', () => {
|
|
65
|
+
const file1 = new File(['content1'], 'file1.txt');
|
|
66
|
+
(file1 as any).webkitRelativePath = 'folder1/file1.txt';
|
|
67
|
+
|
|
68
|
+
const file2 = new File(['content2'], 'file2.txt');
|
|
69
|
+
(file2 as any).webkitRelativePath = 'folder2/file2.txt';
|
|
70
|
+
|
|
71
|
+
const file3 = new File(['content3'], 'file3.txt');
|
|
72
|
+
(file3 as any).webkitRelativePath = 'folder1/subfolder/file3.txt';
|
|
73
|
+
|
|
74
|
+
const result = buildFolderTree([file1, file2, file3]);
|
|
75
|
+
|
|
76
|
+
expect(result.folders).toEqual({
|
|
77
|
+
'folder1': {
|
|
78
|
+
name: 'folder1',
|
|
79
|
+
parent: null,
|
|
80
|
+
},
|
|
81
|
+
'folder2': {
|
|
82
|
+
name: 'folder2',
|
|
83
|
+
parent: null,
|
|
84
|
+
},
|
|
85
|
+
'folder1/subfolder': {
|
|
86
|
+
name: 'subfolder',
|
|
87
|
+
parent: 'folder1',
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
expect(result.filesByFolder['folder1']).toHaveLength(1);
|
|
92
|
+
expect(result.filesByFolder['folder2']).toHaveLength(1);
|
|
93
|
+
expect(result.filesByFolder['folder1/subfolder']).toHaveLength(1);
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
it('should not duplicate folders when processing multiple files in same folder', () => {
|
|
97
|
+
const file1 = new File(['content1'], 'file1.txt');
|
|
98
|
+
(file1 as any).webkitRelativePath = 'shared/file1.txt';
|
|
99
|
+
|
|
100
|
+
const file2 = new File(['content2'], 'file2.txt');
|
|
101
|
+
(file2 as any).webkitRelativePath = 'shared/file2.txt';
|
|
102
|
+
|
|
103
|
+
const file3 = new File(['content3'], 'file3.txt');
|
|
104
|
+
(file3 as any).webkitRelativePath = 'shared/file3.txt';
|
|
105
|
+
|
|
106
|
+
const result = buildFolderTree([file1, file2, file3]);
|
|
107
|
+
|
|
108
|
+
// Should only have one 'shared' folder entry
|
|
109
|
+
expect(Object.keys(result.folders)).toEqual(['shared']);
|
|
110
|
+
expect(result.folders['shared']).toEqual({
|
|
111
|
+
name: 'shared',
|
|
112
|
+
parent: null,
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
// All three files should be in the same folder
|
|
116
|
+
expect(result.filesByFolder['shared']).toHaveLength(3);
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
it('should handle mixed single files and folder files', () => {
|
|
120
|
+
const file1 = new File(['content1'], 'root.txt');
|
|
121
|
+
|
|
122
|
+
const file2 = new File(['content2'], 'nested.txt');
|
|
123
|
+
(file2 as any).webkitRelativePath = 'folder/nested.txt';
|
|
124
|
+
|
|
125
|
+
const result = buildFolderTree([file1, file2]);
|
|
126
|
+
|
|
127
|
+
expect(result.folders).toEqual({
|
|
128
|
+
folder: {
|
|
129
|
+
name: 'folder',
|
|
130
|
+
parent: null,
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
expect(result.filesByFolder['']).toHaveLength(1);
|
|
135
|
+
expect(result.filesByFolder[''][0].name).toBe('root.txt');
|
|
136
|
+
expect(result.filesByFolder['folder']).toHaveLength(1);
|
|
137
|
+
expect(result.filesByFolder['folder'][0].name).toBe('nested.txt');
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
it('should handle empty file array', () => {
|
|
141
|
+
const result = buildFolderTree([]);
|
|
142
|
+
|
|
143
|
+
expect(result.folders).toEqual({});
|
|
144
|
+
expect(result.filesByFolder).toEqual({});
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
it('should handle files with special characters in path', () => {
|
|
148
|
+
const file = new File(['content'], 'file.txt');
|
|
149
|
+
(file as any).webkitRelativePath = 'my-folder/sub_folder/file.txt';
|
|
150
|
+
|
|
151
|
+
const result = buildFolderTree([file]);
|
|
152
|
+
|
|
153
|
+
expect(result.folders).toEqual({
|
|
154
|
+
'my-folder': {
|
|
155
|
+
name: 'my-folder',
|
|
156
|
+
parent: null,
|
|
157
|
+
},
|
|
158
|
+
'my-folder/sub_folder': {
|
|
159
|
+
name: 'sub_folder',
|
|
160
|
+
parent: 'my-folder',
|
|
161
|
+
},
|
|
162
|
+
});
|
|
163
|
+
});
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
describe('topologicalSortFolders', () => {
|
|
167
|
+
it('should sort folders by depth (shallowest first)', () => {
|
|
168
|
+
const folders = {
|
|
169
|
+
'a/b/c': { name: 'c', parent: 'a/b' },
|
|
170
|
+
'a': { name: 'a', parent: null },
|
|
171
|
+
'a/b': { name: 'b', parent: 'a' },
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
const result = topologicalSortFolders(folders);
|
|
175
|
+
|
|
176
|
+
expect(result).toEqual(['a', 'a/b', 'a/b/c']);
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
it('should sort multiple root-level folders correctly', () => {
|
|
180
|
+
const folders = {
|
|
181
|
+
'folder2/sub': { name: 'sub', parent: 'folder2' },
|
|
182
|
+
'folder1': { name: 'folder1', parent: null },
|
|
183
|
+
'folder2': { name: 'folder2', parent: null },
|
|
184
|
+
};
|
|
185
|
+
|
|
186
|
+
const result = topologicalSortFolders(folders);
|
|
187
|
+
|
|
188
|
+
// Root folders should come before nested folders
|
|
189
|
+
expect(result[0]).toBe('folder1');
|
|
190
|
+
expect(result[1]).toBe('folder2');
|
|
191
|
+
expect(result[2]).toBe('folder2/sub');
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
it('should handle complex nested structures', () => {
|
|
195
|
+
const folders = {
|
|
196
|
+
'a/b/c/d': { name: 'd', parent: 'a/b/c' },
|
|
197
|
+
'x/y': { name: 'y', parent: 'x' },
|
|
198
|
+
'a': { name: 'a', parent: null },
|
|
199
|
+
'x': { name: 'x', parent: null },
|
|
200
|
+
'a/b': { name: 'b', parent: 'a' },
|
|
201
|
+
'a/b/c': { name: 'c', parent: 'a/b' },
|
|
202
|
+
};
|
|
203
|
+
|
|
204
|
+
const result = topologicalSortFolders(folders);
|
|
205
|
+
|
|
206
|
+
// Check that parents always come before children
|
|
207
|
+
expect(result.indexOf('a')).toBeLessThan(result.indexOf('a/b'));
|
|
208
|
+
expect(result.indexOf('a/b')).toBeLessThan(result.indexOf('a/b/c'));
|
|
209
|
+
expect(result.indexOf('a/b/c')).toBeLessThan(result.indexOf('a/b/c/d'));
|
|
210
|
+
expect(result.indexOf('x')).toBeLessThan(result.indexOf('x/y'));
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
it('should handle single folder', () => {
|
|
214
|
+
const folders = {
|
|
215
|
+
folder: { name: 'folder', parent: null },
|
|
216
|
+
};
|
|
217
|
+
|
|
218
|
+
const result = topologicalSortFolders(folders);
|
|
219
|
+
|
|
220
|
+
expect(result).toEqual(['folder']);
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
it('should handle empty folder object', () => {
|
|
224
|
+
const result = topologicalSortFolders({});
|
|
225
|
+
|
|
226
|
+
expect(result).toEqual([]);
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
it('should preserve order for folders at same depth', () => {
|
|
230
|
+
const folders = {
|
|
231
|
+
'a/b': { name: 'b', parent: 'a' },
|
|
232
|
+
'a/c': { name: 'c', parent: 'a' },
|
|
233
|
+
'a/d': { name: 'd', parent: 'a' },
|
|
234
|
+
'a': { name: 'a', parent: null },
|
|
235
|
+
};
|
|
236
|
+
|
|
237
|
+
const result = topologicalSortFolders(folders);
|
|
238
|
+
|
|
239
|
+
// Parent should be first
|
|
240
|
+
expect(result[0]).toBe('a');
|
|
241
|
+
|
|
242
|
+
// Siblings (same depth) should maintain their relative order
|
|
243
|
+
const siblings = result.slice(1);
|
|
244
|
+
expect(siblings).toHaveLength(3);
|
|
245
|
+
expect(siblings).toContain('a/b');
|
|
246
|
+
expect(siblings).toContain('a/c');
|
|
247
|
+
expect(siblings).toContain('a/d');
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
describe('sanitizeFolderName', () => {
|
|
252
|
+
it('should keep valid alphanumeric characters', () => {
|
|
253
|
+
expect(sanitizeFolderName('MyFolder123')).toBe('MyFolder123');
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
it('should keep spaces', () => {
|
|
257
|
+
expect(sanitizeFolderName('My Folder Name')).toBe('My Folder Name');
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
it('should keep hyphens and underscores', () => {
|
|
261
|
+
expect(sanitizeFolderName('my-folder_name')).toBe('my-folder_name');
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
it('should replace invalid characters with underscores', () => {
|
|
265
|
+
expect(sanitizeFolderName('folder/name')).toBe('folder_name');
|
|
266
|
+
expect(sanitizeFolderName('folder\\name')).toBe('folder_name');
|
|
267
|
+
expect(sanitizeFolderName('folder:name')).toBe('folder_name');
|
|
268
|
+
expect(sanitizeFolderName('folder*name')).toBe('folder_name');
|
|
269
|
+
expect(sanitizeFolderName('folder?name')).toBe('folder_name');
|
|
270
|
+
expect(sanitizeFolderName('folder"name')).toBe('folder_name');
|
|
271
|
+
expect(sanitizeFolderName('folder<name>')).toBe('folder_name_');
|
|
272
|
+
expect(sanitizeFolderName('folder|name')).toBe('folder_name');
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
it('should replace control characters with underscores', () => {
|
|
276
|
+
expect(sanitizeFolderName('folder\x00name')).toBe('folder_name');
|
|
277
|
+
expect(sanitizeFolderName('folder\x01name')).toBe('folder_name');
|
|
278
|
+
expect(sanitizeFolderName('folder\x1Fname')).toBe('folder_name');
|
|
279
|
+
});
|
|
280
|
+
|
|
281
|
+
it('should trim whitespace from start and end', () => {
|
|
282
|
+
expect(sanitizeFolderName(' folder ')).toBe('folder');
|
|
283
|
+
expect(sanitizeFolderName('\tfolder\t')).toBe('_folder_');
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
it('should handle multiple invalid characters', () => {
|
|
287
|
+
expect(sanitizeFolderName('my/folder\\with:many*invalid?chars')).toBe(
|
|
288
|
+
'my_folder_with_many_invalid_chars',
|
|
289
|
+
);
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
it('should handle empty string', () => {
|
|
293
|
+
expect(sanitizeFolderName('')).toBe('');
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
it('should handle string with only invalid characters', () => {
|
|
297
|
+
expect(sanitizeFolderName('/:*?<>|')).toBe('_______');
|
|
298
|
+
});
|
|
299
|
+
|
|
300
|
+
it('should handle unicode characters correctly', () => {
|
|
301
|
+
expect(sanitizeFolderName('我的文件夹')).toBe('我的文件夹');
|
|
302
|
+
expect(sanitizeFolderName('папка')).toBe('папка');
|
|
303
|
+
expect(sanitizeFolderName('フォルダ')).toBe('フォルダ');
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
it('should handle mixed valid and invalid characters', () => {
|
|
307
|
+
expect(sanitizeFolderName('Project-2024_draft/final?')).toBe('Project-2024_draft_final_');
|
|
308
|
+
});
|
|
309
|
+
});
|
|
310
|
+
});
|
|
@@ -10,7 +10,7 @@ import {
|
|
|
10
10
|
} from '@/server/services/memory/userMemory/extract';
|
|
11
11
|
|
|
12
12
|
export const POST = async (req: Request) => {
|
|
13
|
-
const { webhookHeaders } = parseMemoryExtractionConfig();
|
|
13
|
+
const { webhookHeaders, upstashWorkflowExtraHeaders } = parseMemoryExtractionConfig();
|
|
14
14
|
|
|
15
15
|
if (webhookHeaders && Object.keys(webhookHeaders).length > 0) {
|
|
16
16
|
for (const [key, value] of Object.entries(webhookHeaders)) {
|
|
@@ -43,6 +43,7 @@ export const POST = async (req: Request) => {
|
|
|
43
43
|
if (params.mode === 'workflow') {
|
|
44
44
|
const { workflowRunId } = await MemoryExtractionWorkflowService.triggerProcessUsers(
|
|
45
45
|
buildWorkflowPayloadInput(params),
|
|
46
|
+
{ extraHeaders: upstashWorkflowExtraHeaders },
|
|
46
47
|
);
|
|
47
48
|
return NextResponse.json(
|
|
48
49
|
{ message: 'Memory extraction scheduled via workflow.', workflowRunId },
|
|
@@ -9,11 +9,14 @@ import {
|
|
|
9
9
|
} from '@/server/services/memory/userMemory/extract';
|
|
10
10
|
import { forEachBatchSequential } from '@/server/services/memory/userMemory/topicBatching';
|
|
11
11
|
import { MemorySourceType } from '@lobechat/types';
|
|
12
|
+
import { parseMemoryExtractionConfig } from '@/server/globalConfig/parseMemoryExtractionConfig';
|
|
12
13
|
|
|
13
14
|
const TOPIC_PAGE_SIZE = 50;
|
|
14
15
|
const TOPIC_BATCH_SIZE = 4;
|
|
15
16
|
|
|
16
17
|
export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
18
|
+
const { upstashWorkflowExtraHeaders } = parseMemoryExtractionConfig();
|
|
19
|
+
|
|
17
20
|
const params = normalizeMemoryExtractionPayload(context.requestPayload || {});
|
|
18
21
|
if (!params.userIds.length) {
|
|
19
22
|
return { message: 'No user ids provided for topic processing.' };
|
|
@@ -37,7 +40,7 @@ export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
|
37
40
|
userId,
|
|
38
41
|
userIds: [userId],
|
|
39
42
|
}),
|
|
40
|
-
});
|
|
43
|
+
}, { extraHeaders: upstashWorkflowExtraHeaders });
|
|
41
44
|
};
|
|
42
45
|
|
|
43
46
|
for (const userId of params.userIds) {
|
|
@@ -99,7 +102,7 @@ export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
|
99
102
|
topicIds,
|
|
100
103
|
userId,
|
|
101
104
|
userIds: [userId],
|
|
102
|
-
}),
|
|
105
|
+
}, { extraHeaders: upstashWorkflowExtraHeaders }),
|
|
103
106
|
);
|
|
104
107
|
});
|
|
105
108
|
|
|
@@ -8,11 +8,14 @@ import {
|
|
|
8
8
|
buildWorkflowPayloadInput,
|
|
9
9
|
normalizeMemoryExtractionPayload,
|
|
10
10
|
} from '@/server/services/memory/userMemory/extract';
|
|
11
|
+
import { parseMemoryExtractionConfig } from '@/server/globalConfig/parseMemoryExtractionConfig';
|
|
11
12
|
|
|
12
13
|
const USER_PAGE_SIZE = 50;
|
|
13
14
|
const USER_BATCH_SIZE = 10;
|
|
14
15
|
|
|
15
16
|
export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
17
|
+
const { upstashWorkflowExtraHeaders } = parseMemoryExtractionConfig();
|
|
18
|
+
|
|
16
19
|
const params = normalizeMemoryExtractionPayload(context.requestPayload || {});
|
|
17
20
|
if (params.sources.length === 0) {
|
|
18
21
|
return { message: 'No sources provided, skip memory extraction.' };
|
|
@@ -49,7 +52,7 @@ export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
|
49
52
|
topicCursor: undefined,
|
|
50
53
|
userId: userIds[0],
|
|
51
54
|
userIds,
|
|
52
|
-
}),
|
|
55
|
+
}, { extraHeaders: upstashWorkflowExtraHeaders},),
|
|
53
56
|
),
|
|
54
57
|
),
|
|
55
58
|
);
|
|
@@ -61,7 +64,7 @@ export const { POST } = serve<MemoryExtractionPayloadInput>(async (context) => {
|
|
|
61
64
|
...params,
|
|
62
65
|
userCursor: { createdAt: cursor.createdAt.toISOString(), id: cursor.id },
|
|
63
66
|
}),
|
|
64
|
-
}),
|
|
67
|
+
}, { extraHeaders: upstashWorkflowExtraHeaders }),
|
|
65
68
|
);
|
|
66
69
|
}
|
|
67
70
|
|
|
@@ -48,6 +48,7 @@ export interface MemoryExtractionPrivateConfig {
|
|
|
48
48
|
region?: string;
|
|
49
49
|
secretAccessKey?: string;
|
|
50
50
|
};
|
|
51
|
+
upstashWorkflowExtraHeaders?: Record<string, string>;
|
|
51
52
|
webhookHeaders?: Record<string, string>;
|
|
52
53
|
whitelistUsers?: string[];
|
|
53
54
|
}
|
|
@@ -190,6 +191,16 @@ export const parseMemoryExtractionConfig = (): MemoryExtractionPrivateConfig =>
|
|
|
190
191
|
return acc;
|
|
191
192
|
}, {});
|
|
192
193
|
|
|
194
|
+
const upstashWorkflowExtraHeaders = process.env.MEMORY_USER_MEMORY_WORKFLOW_EXTRA_HEADERS?.split(',')
|
|
195
|
+
.filter(Boolean)
|
|
196
|
+
.reduce<Record<string, string>>((acc, pair) => {
|
|
197
|
+
const [key, value] = pair.split('=').map((s) => s.trim());
|
|
198
|
+
if (key && value) {
|
|
199
|
+
acc[key] = value;
|
|
200
|
+
}
|
|
201
|
+
return acc;
|
|
202
|
+
}, {});
|
|
203
|
+
|
|
193
204
|
return {
|
|
194
205
|
agentGateKeeper,
|
|
195
206
|
agentLayerExtractor,
|
|
@@ -197,6 +208,7 @@ export const parseMemoryExtractionConfig = (): MemoryExtractionPrivateConfig =>
|
|
|
197
208
|
embedding,
|
|
198
209
|
featureFlags,
|
|
199
210
|
observabilityS3: extractorObservabilityS3,
|
|
211
|
+
upstashWorkflowExtraHeaders,
|
|
200
212
|
webhookHeaders,
|
|
201
213
|
whitelistUsers,
|
|
202
214
|
};
|
|
@@ -38,7 +38,7 @@ export const createAsyncServerClient = async (userId: string) => {
|
|
|
38
38
|
};
|
|
39
39
|
|
|
40
40
|
/**
|
|
41
|
-
*
|
|
41
|
+
* Helper method for inferring caller type, but does not actually call createAsyncCallerFactory. Calling it will throw an error: asyncRouter is not initialized
|
|
42
42
|
*/
|
|
43
43
|
const helperFunc = () => {
|
|
44
44
|
const dummyCreateCaller = createAsyncCallerFactory(asyncRouter);
|
|
@@ -52,8 +52,8 @@ interface CreateCallerOptions {
|
|
|
52
52
|
}
|
|
53
53
|
|
|
54
54
|
/**
|
|
55
|
-
*
|
|
56
|
-
*
|
|
55
|
+
* Factory method for creating caller, using HTTP Client to make calls
|
|
56
|
+
* Unified usage pattern: caller.a.b()
|
|
57
57
|
*/
|
|
58
58
|
export const createAsyncCaller = async (
|
|
59
59
|
options: CreateCallerOptions,
|
|
@@ -147,7 +147,7 @@ const categorizeError = (
|
|
|
147
147
|
};
|
|
148
148
|
}
|
|
149
149
|
|
|
150
|
-
// FIXME: 401
|
|
150
|
+
// FIXME: 401 errors should be handled in agentRuntime for better practice
|
|
151
151
|
if (error.errorType === AgentRuntimeErrorType.InvalidProviderAPIKey || error?.status === 401) {
|
|
152
152
|
return {
|
|
153
153
|
errorMessage:
|
|
@@ -63,7 +63,7 @@ export const ragEvalRouter = router({
|
|
|
63
63
|
let questionEmbeddingId = evalRecord.questionEmbeddingId;
|
|
64
64
|
let context = evalRecord.context;
|
|
65
65
|
|
|
66
|
-
//
|
|
66
|
+
// If questionEmbeddingId does not exist, perform an embedding
|
|
67
67
|
if (!questionEmbeddingId) {
|
|
68
68
|
const embeddings = await modelRuntime.embeddings({
|
|
69
69
|
dimensions: 1024,
|
|
@@ -83,7 +83,7 @@ export const ragEvalRouter = router({
|
|
|
83
83
|
questionEmbeddingId = embeddingId;
|
|
84
84
|
}
|
|
85
85
|
|
|
86
|
-
//
|
|
86
|
+
// If context does not exist, perform a retrieval
|
|
87
87
|
if (!context || context.length === 0) {
|
|
88
88
|
const datasetRecord = await ctx.datasetRecordModel.findById(evalRecord.datasetRecordId);
|
|
89
89
|
|
|
@@ -99,7 +99,7 @@ export const ragEvalRouter = router({
|
|
|
99
99
|
await ctx.evalRecordModel.update(evalRecord.id, { context });
|
|
100
100
|
}
|
|
101
101
|
|
|
102
|
-
//
|
|
102
|
+
// Generate LLM answer
|
|
103
103
|
const { messages } = chainAnswerWithContext({ context, knowledge: [], question });
|
|
104
104
|
|
|
105
105
|
const response = await modelRuntime.chat({
|
|
@@ -54,14 +54,14 @@ export const resolveContext = async (
|
|
|
54
54
|
};
|
|
55
55
|
|
|
56
56
|
/**
|
|
57
|
-
*
|
|
57
|
+
* Reverse resolution: Get agentId from sessionId
|
|
58
58
|
*
|
|
59
|
-
*
|
|
59
|
+
* Used in scenarios like Topic Router where agentId is needed for queries
|
|
60
60
|
*
|
|
61
61
|
* @param sessionId - session ID
|
|
62
|
-
* @param db -
|
|
63
|
-
* @param userId -
|
|
64
|
-
* @returns agentId
|
|
62
|
+
* @param db - Database instance
|
|
63
|
+
* @param userId - User ID
|
|
64
|
+
* @returns agentId or undefined
|
|
65
65
|
*/
|
|
66
66
|
export const resolveAgentIdFromSession = async (
|
|
67
67
|
sessionId: string,
|
|
@@ -78,14 +78,14 @@ export const resolveAgentIdFromSession = async (
|
|
|
78
78
|
};
|
|
79
79
|
|
|
80
80
|
/**
|
|
81
|
-
*
|
|
81
|
+
* Batch reverse resolution: Get agentId mapping from multiple sessionIds
|
|
82
82
|
*
|
|
83
|
-
*
|
|
83
|
+
* Used in scenarios requiring batch sessionId -> agentId resolution (e.g., recentTopics)
|
|
84
84
|
*
|
|
85
|
-
* @param sessionIds - session
|
|
86
|
-
* @param db -
|
|
87
|
-
* @param userId -
|
|
88
|
-
* @returns sessionId -> agentId
|
|
85
|
+
* @param sessionIds - Array of session IDs
|
|
86
|
+
* @param db - Database instance
|
|
87
|
+
* @param userId - User ID
|
|
88
|
+
* @returns Map of sessionId -> agentId
|
|
89
89
|
*/
|
|
90
90
|
export const batchResolveAgentIdFromSessions = async (
|
|
91
91
|
sessionIds: string[],
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import { z } from 'zod';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
5
|
-
*
|
|
4
|
+
* Conversation context schema
|
|
5
|
+
* Supports both agentId and sessionId for backward compatibility
|
|
6
6
|
*
|
|
7
|
-
*
|
|
8
|
-
*
|
|
7
|
+
* Priority: agentId > sessionId
|
|
8
|
+
* When both are provided, agentId will be used to resolve the corresponding sessionId
|
|
9
9
|
*/
|
|
10
10
|
export const conversationContextSchema = z.object({
|
|
11
11
|
agentId: z.string().optional(),
|
|
@@ -16,8 +16,8 @@ export const conversationContextSchema = z.object({
|
|
|
16
16
|
});
|
|
17
17
|
|
|
18
18
|
/**
|
|
19
|
-
*
|
|
20
|
-
*
|
|
19
|
+
* Simplified context
|
|
20
|
+
* Used for CRUD operations of messages and topics
|
|
21
21
|
*/
|
|
22
22
|
export const basicContextSchema = z.object({
|
|
23
23
|
agentId: z.string().optional(),
|
|
@@ -1841,30 +1841,30 @@ export class MemoryExtractionWorkflowService {
|
|
|
1841
1841
|
return this.client;
|
|
1842
1842
|
}
|
|
1843
1843
|
|
|
1844
|
-
static triggerProcessUsers(payload: MemoryExtractionPayloadInput) {
|
|
1844
|
+
static triggerProcessUsers(payload: MemoryExtractionPayloadInput, options?: { extraHeaders?: Record<string, string> }) {
|
|
1845
1845
|
if (!payload.baseUrl) {
|
|
1846
1846
|
throw new Error('Missing baseUrl for workflow trigger');
|
|
1847
1847
|
}
|
|
1848
1848
|
|
|
1849
1849
|
const url = getWorkflowUrl(WORKFLOW_PATHS.users, payload.baseUrl);
|
|
1850
|
-
return this.getClient().trigger({ body: payload, url });
|
|
1850
|
+
return this.getClient().trigger({ body: payload, headers: options?.extraHeaders, url });
|
|
1851
1851
|
}
|
|
1852
1852
|
|
|
1853
|
-
static triggerProcessUserTopics(payload: UserTopicWorkflowPayload) {
|
|
1853
|
+
static triggerProcessUserTopics(payload: UserTopicWorkflowPayload, options?: { extraHeaders?: Record<string, string> }) {
|
|
1854
1854
|
if (!payload.baseUrl) {
|
|
1855
1855
|
throw new Error('Missing baseUrl for workflow trigger');
|
|
1856
1856
|
}
|
|
1857
1857
|
|
|
1858
1858
|
const url = getWorkflowUrl(WORKFLOW_PATHS.userTopics, payload.baseUrl);
|
|
1859
|
-
return this.getClient().trigger({ body: payload, url });
|
|
1859
|
+
return this.getClient().trigger({ body: payload, headers: options?.extraHeaders, url });
|
|
1860
1860
|
}
|
|
1861
1861
|
|
|
1862
|
-
static triggerProcessTopics(payload: MemoryExtractionPayloadInput) {
|
|
1862
|
+
static triggerProcessTopics(payload: MemoryExtractionPayloadInput, options?: { extraHeaders?: Record<string, string> }) {
|
|
1863
1863
|
if (!payload.baseUrl) {
|
|
1864
1864
|
throw new Error('Missing baseUrl for workflow trigger');
|
|
1865
1865
|
}
|
|
1866
1866
|
|
|
1867
1867
|
const url = getWorkflowUrl(WORKFLOW_PATHS.topicBatch, payload.baseUrl);
|
|
1868
|
-
return this.getClient().trigger({ body: payload, url });
|
|
1868
|
+
return this.getClient().trigger({ body: payload, headers: options?.extraHeaders, url });
|
|
1869
1869
|
}
|
|
1870
1870
|
}
|
package/src/server/sitemap.ts
CHANGED
|
@@ -32,7 +32,7 @@ export enum SitemapType {
|
|
|
32
32
|
|
|
33
33
|
export const LAST_MODIFIED = new Date().toISOString();
|
|
34
34
|
|
|
35
|
-
//
|
|
35
|
+
// Number of items per page
|
|
36
36
|
const ITEMS_PER_PAGE = 100;
|
|
37
37
|
|
|
38
38
|
export class Sitemap {
|
|
@@ -40,19 +40,19 @@ export class Sitemap {
|
|
|
40
40
|
|
|
41
41
|
private discoverService = new DiscoverService();
|
|
42
42
|
|
|
43
|
-
//
|
|
43
|
+
// Get total number of plugin pages
|
|
44
44
|
async getPluginPageCount(): Promise<number> {
|
|
45
45
|
const list = await this.discoverService.getPluginIdentifiers();
|
|
46
46
|
return Math.ceil(list.length / ITEMS_PER_PAGE);
|
|
47
47
|
}
|
|
48
48
|
|
|
49
|
-
//
|
|
49
|
+
// Get total number of assistant pages
|
|
50
50
|
async getAssistantPageCount(): Promise<number> {
|
|
51
51
|
const list = await this.discoverService.getAssistantIdentifiers();
|
|
52
52
|
return Math.ceil(list.length / ITEMS_PER_PAGE);
|
|
53
53
|
}
|
|
54
54
|
|
|
55
|
-
//
|
|
55
|
+
// Get total number of model pages
|
|
56
56
|
async getModelPageCount(): Promise<number> {
|
|
57
57
|
const list = await this.discoverService.getModelIdentifiers();
|
|
58
58
|
return Math.ceil(list.length / ITEMS_PER_PAGE);
|
|
@@ -164,14 +164,14 @@ export class Sitemap {
|
|
|
164
164
|
),
|
|
165
165
|
);
|
|
166
166
|
|
|
167
|
-
//
|
|
167
|
+
// Get page counts for types that need pagination
|
|
168
168
|
const [pluginPages, assistantPages, modelPages] = await Promise.all([
|
|
169
169
|
this.getPluginPageCount(),
|
|
170
170
|
this.getAssistantPageCount(),
|
|
171
171
|
this.getModelPageCount(),
|
|
172
172
|
]);
|
|
173
173
|
|
|
174
|
-
//
|
|
174
|
+
// Generate paginated sitemap links
|
|
175
175
|
const paginatedSitemaps = [
|
|
176
176
|
...Array.from({ length: pluginPages }, (_, i) =>
|
|
177
177
|
this._generateSitemapLink(
|
|
@@ -211,7 +211,7 @@ export class Sitemap {
|
|
|
211
211
|
const pageAssistants = list.slice(startIndex, endIndex);
|
|
212
212
|
|
|
213
213
|
const sitmap = pageAssistants
|
|
214
|
-
.filter((item) => item.identifier) //
|
|
214
|
+
.filter((item) => item.identifier) // Filter out items with empty identifiers
|
|
215
215
|
.map((item) =>
|
|
216
216
|
this._genSitemap(urlJoin('/community/assistant', item.identifier), {
|
|
217
217
|
lastModified: item?.lastModified || LAST_MODIFIED,
|
|
@@ -220,7 +220,7 @@ export class Sitemap {
|
|
|
220
220
|
return flatten(sitmap);
|
|
221
221
|
}
|
|
222
222
|
|
|
223
|
-
//
|
|
223
|
+
// If page number is not specified, return all (backward compatibility)
|
|
224
224
|
const sitmap = list
|
|
225
225
|
.filter((item) => item.identifier) // 过滤掉 identifier 为空的项目
|
|
226
226
|
.map((item) =>
|
|
@@ -240,7 +240,7 @@ export class Sitemap {
|
|
|
240
240
|
const pagePlugins = list.slice(startIndex, endIndex);
|
|
241
241
|
|
|
242
242
|
const sitmap = pagePlugins
|
|
243
|
-
.filter((item) => item.identifier) //
|
|
243
|
+
.filter((item) => item.identifier) // Filter out items with empty identifiers
|
|
244
244
|
.map((item) =>
|
|
245
245
|
this._genSitemap(urlJoin('/community/plugin', item.identifier), {
|
|
246
246
|
lastModified: item?.lastModified || LAST_MODIFIED,
|
|
@@ -249,7 +249,7 @@ export class Sitemap {
|
|
|
249
249
|
return flatten(sitmap);
|
|
250
250
|
}
|
|
251
251
|
|
|
252
|
-
//
|
|
252
|
+
// If page number is not specified, return all (backward compatibility)
|
|
253
253
|
const sitmap = list
|
|
254
254
|
.filter((item) => item.identifier) // 过滤掉 identifier 为空的项目
|
|
255
255
|
.map((item) =>
|
|
@@ -269,7 +269,7 @@ export class Sitemap {
|
|
|
269
269
|
const pageModels = list.slice(startIndex, endIndex);
|
|
270
270
|
|
|
271
271
|
const sitmap = pageModels
|
|
272
|
-
.filter((item) => item.identifier) //
|
|
272
|
+
.filter((item) => item.identifier) // Filter out items with empty identifiers
|
|
273
273
|
.map((item) =>
|
|
274
274
|
this._genSitemap(urlJoin('/community/model', item.identifier), {
|
|
275
275
|
lastModified: item?.lastModified || LAST_MODIFIED,
|
|
@@ -278,7 +278,7 @@ export class Sitemap {
|
|
|
278
278
|
return flatten(sitmap);
|
|
279
279
|
}
|
|
280
280
|
|
|
281
|
-
//
|
|
281
|
+
// If page number is not specified, return all (backward compatibility)
|
|
282
282
|
const sitmap = list
|
|
283
283
|
.filter((item) => item.identifier) // 过滤掉 identifier 为空的项目
|
|
284
284
|
.map((item) =>
|