@lobehub/chat 1.116.3 → 1.117.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/PULL_REQUEST_TEMPLATE.md +1 -0
- package/.github/workflows/release.yml +2 -0
- package/.i18nrc.js +1 -1
- package/CHANGELOG.md +117 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/components.json +12 -0
- package/locales/ar/models.json +3 -0
- package/locales/bg-BG/components.json +12 -0
- package/locales/bg-BG/models.json +3 -0
- package/locales/de-DE/components.json +12 -0
- package/locales/de-DE/models.json +3 -0
- package/locales/en-US/components.json +12 -0
- package/locales/en-US/models.json +3 -0
- package/locales/es-ES/components.json +12 -0
- package/locales/es-ES/models.json +3 -0
- package/locales/fa-IR/components.json +12 -0
- package/locales/fa-IR/models.json +3 -0
- package/locales/fr-FR/components.json +12 -0
- package/locales/fr-FR/models.json +3 -0
- package/locales/it-IT/components.json +12 -0
- package/locales/it-IT/models.json +3 -0
- package/locales/ja-JP/components.json +12 -0
- package/locales/ja-JP/models.json +3 -0
- package/locales/ko-KR/components.json +12 -0
- package/locales/ko-KR/models.json +3 -0
- package/locales/nl-NL/components.json +12 -0
- package/locales/nl-NL/models.json +3 -0
- package/locales/pl-PL/components.json +12 -0
- package/locales/pl-PL/models.json +3 -0
- package/locales/pt-BR/components.json +12 -0
- package/locales/pt-BR/models.json +3 -0
- package/locales/ru-RU/components.json +12 -0
- package/locales/ru-RU/models.json +3 -0
- package/locales/tr-TR/components.json +12 -0
- package/locales/tr-TR/models.json +3 -0
- package/locales/vi-VN/components.json +12 -0
- package/locales/vi-VN/models.json +3 -0
- package/locales/zh-CN/components.json +12 -0
- package/locales/zh-CN/models.json +3 -0
- package/locales/zh-TW/components.json +12 -0
- package/locales/zh-TW/models.json +3 -0
- package/package.json +5 -5
- package/packages/const/src/image.ts +9 -0
- package/packages/const/src/index.ts +2 -1
- package/packages/const/src/meta.ts +3 -2
- package/packages/const/src/settings/agent.ts +9 -4
- package/packages/const/src/settings/systemAgent.ts +0 -3
- package/packages/database/vitest.config.mts +1 -0
- package/packages/database/vitest.config.server.mts +1 -0
- package/packages/file-loaders/package.json +1 -1
- package/packages/file-loaders/vitest.config.mts +3 -7
- package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +11 -9
- package/packages/model-runtime/src/google/createImage.test.ts +657 -0
- package/packages/model-runtime/src/google/createImage.ts +152 -0
- package/packages/model-runtime/src/google/index.test.ts +0 -328
- package/packages/model-runtime/src/google/index.ts +3 -40
- package/packages/model-runtime/src/utils/modelParse.ts +2 -1
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +239 -0
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +22 -22
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +9 -116
- package/packages/model-runtime/src/utils/postProcessModelList.ts +55 -0
- package/packages/model-runtime/src/utils/streams/google-ai.test.ts +7 -7
- package/packages/model-runtime/src/utils/streams/google-ai.ts +15 -2
- package/packages/model-runtime/src/utils/streams/openai/openai.test.ts +41 -0
- package/packages/model-runtime/src/utils/streams/openai/openai.ts +38 -2
- package/packages/model-runtime/src/utils/streams/protocol.test.ts +32 -0
- package/packages/model-runtime/src/utils/streams/protocol.ts +7 -3
- package/packages/model-runtime/src/utils/usageConverter.test.ts +58 -0
- package/packages/model-runtime/src/utils/usageConverter.ts +5 -1
- package/packages/model-runtime/vitest.config.mts +3 -0
- package/packages/prompts/package.json +0 -1
- package/packages/prompts/src/chains/__tests__/abstractChunk.test.ts +52 -0
- package/packages/prompts/src/chains/__tests__/answerWithContext.test.ts +100 -0
- package/packages/prompts/src/chains/__tests__/rewriteQuery.test.ts +88 -0
- package/packages/prompts/src/chains/__tests__/summaryGenerationTitle.test.ts +107 -0
- package/packages/prompts/src/chains/abstractChunk.ts +0 -2
- package/packages/prompts/src/chains/rewriteQuery.ts +3 -1
- package/packages/prompts/src/index.test.ts +41 -0
- package/packages/prompts/src/prompts/systemRole/index.test.ts +136 -0
- package/packages/prompts/vitest.config.mts +3 -0
- package/packages/types/src/index.ts +2 -0
- package/packages/utils/package.json +5 -1
- package/packages/utils/src/client/index.ts +2 -0
- package/packages/utils/src/server/index.ts +5 -0
- package/packages/utils/vitest.config.mts +4 -0
- package/src/app/(backend)/middleware/auth/index.test.ts +2 -2
- package/src/app/(backend)/middleware/auth/index.ts +1 -1
- package/src/app/(backend)/oidc/consent/route.ts +1 -2
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +2 -2
- package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -1
- package/src/app/[variants]/(main)/files/[id]/page.tsx +1 -1
- package/src/app/[variants]/(main)/settings/sync/page.tsx +1 -1
- package/src/app/[variants]/(main)/settings/system-agent/index.tsx +2 -1
- package/src/components/HtmlPreview/HtmlPreviewAction.tsx +32 -0
- package/src/components/HtmlPreview/PreviewDrawer.tsx +133 -0
- package/src/components/HtmlPreview/index.ts +2 -0
- package/src/config/aiModels/google.ts +42 -22
- package/src/config/aiModels/openrouter.ts +33 -0
- package/src/config/aiModels/vertexai.ts +4 -4
- package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +6 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +38 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +13 -1
- package/src/features/Conversation/components/ChatItem/ShareMessageModal/ShareText/index.tsx +1 -1
- package/src/features/Conversation/components/ChatItem/index.tsx +23 -0
- package/src/features/ShareModal/ShareJSON/index.tsx +2 -2
- package/src/features/ShareModal/ShareText/index.tsx +1 -1
- package/src/libs/oidc-provider/adapter.ts +1 -1
- package/src/libs/trpc/edge/middleware/jwtPayload.test.ts +1 -1
- package/src/libs/trpc/edge/middleware/jwtPayload.ts +1 -2
- package/src/libs/trpc/lambda/middleware/keyVaults.ts +1 -2
- package/src/locales/default/chat.ts +1 -0
- package/src/locales/default/components.ts +12 -0
- package/src/middleware.ts +3 -3
- package/src/server/routers/tools/search.test.ts +1 -1
- package/src/services/config.ts +2 -4
- package/src/utils/client/switchLang.ts +1 -1
- package/{packages/utils/src → src/utils}/server/pageProps.ts +2 -1
- package/tsconfig.json +1 -1
- package/vitest.config.mts +1 -0
- package/packages/model-runtime/src/UniformRuntime/index.ts +0 -117
- /package/{packages/const/src → src/const}/locale.ts +0 -0
- /package/{packages/utils/src → src/utils}/locale.test.ts +0 -0
- /package/{packages/utils/src → src/utils}/locale.ts +0 -0
- /package/{packages/utils/src → src/utils}/server/routeVariants.ts +0 -0
@@ -2271,4 +2271,45 @@ describe('OpenAIStream', () => {
|
|
2271
2271
|
);
|
2272
2272
|
});
|
2273
2273
|
});
|
2274
|
+
|
2275
|
+
it('should handle base64_image in delta.images (image_url shape)', async () => {
|
2276
|
+
const base64 =
|
2277
|
+
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==';
|
2278
|
+
|
2279
|
+
const mockOpenAIStream = new ReadableStream({
|
2280
|
+
start(controller) {
|
2281
|
+
controller.enqueue({
|
2282
|
+
choices: [
|
2283
|
+
{
|
2284
|
+
delta: {
|
2285
|
+
images: [
|
2286
|
+
{
|
2287
|
+
type: 'image_url',
|
2288
|
+
image_url: { url: base64 },
|
2289
|
+
index: 0,
|
2290
|
+
},
|
2291
|
+
],
|
2292
|
+
},
|
2293
|
+
index: 0,
|
2294
|
+
},
|
2295
|
+
],
|
2296
|
+
id: '6',
|
2297
|
+
});
|
2298
|
+
|
2299
|
+
controller.close();
|
2300
|
+
},
|
2301
|
+
});
|
2302
|
+
|
2303
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
2304
|
+
|
2305
|
+
const decoder = new TextDecoder();
|
2306
|
+
const chunks = [];
|
2307
|
+
|
2308
|
+
// @ts-ignore
|
2309
|
+
for await (const chunk of protocolStream) {
|
2310
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
2311
|
+
}
|
2312
|
+
|
2313
|
+
expect(chunks).toEqual(['id: 6\n', 'event: base64_image\n', `data: "${base64}"\n\n`]);
|
2314
|
+
});
|
2274
2315
|
});
|
@@ -96,6 +96,36 @@ const transformOpenAIStream = (
|
|
96
96
|
}
|
97
97
|
}
|
98
98
|
|
99
|
+
// Handle image preview chunks (e.g. Gemini 2.5 flash image preview)
|
100
|
+
// Example shape:
|
101
|
+
// choices[0].delta.images = [{ type: 'image_url', image_url: { url: 'data:image/png;base64,...' }, index: 0 }]
|
102
|
+
if (
|
103
|
+
(item as any).delta &&
|
104
|
+
Array.isArray((item as any).delta.images) &&
|
105
|
+
(item as any).delta.images.length > 0
|
106
|
+
) {
|
107
|
+
const images = (item as any).delta.images as any[];
|
108
|
+
|
109
|
+
return images
|
110
|
+
.map((img) => {
|
111
|
+
// support multiple possible shapes for the url
|
112
|
+
const url =
|
113
|
+
img?.image_url?.url ||
|
114
|
+
img?.image_url?.image_url?.url ||
|
115
|
+
img?.url ||
|
116
|
+
(typeof img === 'string' ? img : undefined);
|
117
|
+
|
118
|
+
if (!url) return null;
|
119
|
+
|
120
|
+
return {
|
121
|
+
data: url,
|
122
|
+
id: chunk.id,
|
123
|
+
type: 'base64_image',
|
124
|
+
} as StreamProtocolChunk;
|
125
|
+
})
|
126
|
+
.filter(Boolean) as StreamProtocolChunk[];
|
127
|
+
}
|
128
|
+
|
99
129
|
// 给定结束原因
|
100
130
|
if (item.finish_reason) {
|
101
131
|
// one-api 的流式接口,会出现既有 finish_reason ,也有 content 的情况
|
@@ -192,11 +222,11 @@ const transformOpenAIStream = (
|
|
192
222
|
if ('content' in item.delta && Array.isArray(item.delta.content)) {
|
193
223
|
return item.delta.content
|
194
224
|
.filter((block: any) => block.type === 'thinking' && Array.isArray(block.thinking))
|
195
|
-
.map((block: any) =>
|
225
|
+
.map((block: any) =>
|
196
226
|
block.thinking
|
197
227
|
.filter((thinkItem: any) => thinkItem.type === 'text' && thinkItem.text)
|
198
228
|
.map((thinkItem: any) => thinkItem.text)
|
199
|
-
.join('')
|
229
|
+
.join(''),
|
200
230
|
)
|
201
231
|
.join('');
|
202
232
|
}
|
@@ -233,6 +263,12 @@ const transformOpenAIStream = (
|
|
233
263
|
streamContext.thinkingInContent = false;
|
234
264
|
}
|
235
265
|
|
266
|
+
// 如果 content 是空字符串但 chunk 带有 usage,则优先返回 usage(例如 Gemini image-preview 最终会在单独的 chunk 中返回 usage)
|
267
|
+
if (content === '' && chunk.usage) {
|
268
|
+
const usage = chunk.usage;
|
269
|
+
return { data: convertUsage(usage, provider), id: chunk.id, type: 'usage' };
|
270
|
+
}
|
271
|
+
|
236
272
|
// 判断是否有 citations 内容,更新 returnedCitation 状态
|
237
273
|
if (!streamContext?.returnedCitation) {
|
238
274
|
const citations =
|
@@ -200,4 +200,36 @@ describe('createTokenSpeedCalculator', async () => {
|
|
200
200
|
const results = await processChunk(transformer, chunks);
|
201
201
|
expect(results).toHaveLength(chunks.length);
|
202
202
|
});
|
203
|
+
|
204
|
+
it('should calculate token speed considering outputImageTokens when totalOutputTokens is missing', async () => {
|
205
|
+
const chunks = [
|
206
|
+
{ data: '', id: 'chatcmpl-image-1', type: 'text' },
|
207
|
+
{ data: 'hi', id: 'chatcmpl-image-1', type: 'text' },
|
208
|
+
{ data: 'stop', id: 'chatcmpl-image-1', type: 'stop' },
|
209
|
+
{
|
210
|
+
data: {
|
211
|
+
inputTextTokens: 9,
|
212
|
+
outputTextTokens: 1,
|
213
|
+
outputImageTokens: 4,
|
214
|
+
totalInputTokens: 9,
|
215
|
+
// totalOutputTokens intentionally omitted to force summation path
|
216
|
+
totalTokens: 13,
|
217
|
+
},
|
218
|
+
id: 'chatcmpl-image-1',
|
219
|
+
type: 'usage',
|
220
|
+
},
|
221
|
+
];
|
222
|
+
|
223
|
+
const transformer = createTokenSpeedCalculator((v) => v, { inputStartAt });
|
224
|
+
const results = await processChunk(transformer, chunks);
|
225
|
+
|
226
|
+
// should push an extra speed chunk
|
227
|
+
expect(results).toHaveLength(chunks.length + 1);
|
228
|
+
const speedChunk = results.slice(-1)[0];
|
229
|
+
expect(speedChunk.id).toBe('output_speed');
|
230
|
+
expect(speedChunk.type).toBe('speed');
|
231
|
+
// tps and ttft should be numeric (avoid flakiness if interval is 0ms)
|
232
|
+
expect(speedChunk.data.tps).not.toBeNaN();
|
233
|
+
expect(speedChunk.data.ttft).not.toBeNaN();
|
234
|
+
});
|
203
235
|
});
|
@@ -364,10 +364,14 @@ export const createTokenSpeedCalculator = (
|
|
364
364
|
}
|
365
365
|
// if the chunk is the stop chunk, set as output finish
|
366
366
|
if (inputStartAt && outputStartAt && chunk.type === 'usage') {
|
367
|
-
const totalOutputTokens =
|
368
|
-
|
367
|
+
const totalOutputTokens =
|
368
|
+
chunk.data?.totalOutputTokens ??
|
369
|
+
(chunk.data?.outputTextTokens ?? 0) + (chunk.data?.outputImageTokens ?? 0);
|
370
|
+
const reasoningTokens = chunk.data?.outputReasoningTokens ?? 0;
|
369
371
|
const outputTokens =
|
370
|
-
(outputThinking ?? false)
|
372
|
+
(outputThinking ?? false)
|
373
|
+
? totalOutputTokens
|
374
|
+
: Math.max(0, totalOutputTokens - reasoningTokens);
|
371
375
|
result.push({
|
372
376
|
data: {
|
373
377
|
tps: (outputTokens / (Date.now() - outputStartAt)) * 1000,
|
@@ -290,4 +290,62 @@ describe('convertUsage', () => {
|
|
290
290
|
totalTokens: 6550,
|
291
291
|
});
|
292
292
|
});
|
293
|
+
|
294
|
+
it('should handle output image tokens correctly', () => {
|
295
|
+
// Arrange
|
296
|
+
const usageWithImage = {
|
297
|
+
prompt_tokens: 100,
|
298
|
+
completion_tokens: 200,
|
299
|
+
completion_tokens_details: {
|
300
|
+
image_tokens: 60,
|
301
|
+
reasoning_tokens: 30,
|
302
|
+
},
|
303
|
+
total_tokens: 300,
|
304
|
+
} as OpenAI.Completions.CompletionUsage;
|
305
|
+
|
306
|
+
// Act
|
307
|
+
const result = convertUsage(usageWithImage);
|
308
|
+
|
309
|
+
// Assert
|
310
|
+
expect(result).toEqual({
|
311
|
+
inputTextTokens: 100,
|
312
|
+
totalInputTokens: 100,
|
313
|
+
totalOutputTokens: 200,
|
314
|
+
outputImageTokens: 60,
|
315
|
+
outputReasoningTokens: 30,
|
316
|
+
outputTextTokens: 110, // 200 - 60 - 30
|
317
|
+
totalTokens: 300,
|
318
|
+
});
|
319
|
+
});
|
320
|
+
|
321
|
+
it('should handle response output image tokens correctly for ResponseUsage', () => {
|
322
|
+
// Arrange
|
323
|
+
const responseUsage = {
|
324
|
+
input_tokens: 100,
|
325
|
+
input_tokens_details: {
|
326
|
+
cached_tokens: 0,
|
327
|
+
},
|
328
|
+
output_tokens: 200,
|
329
|
+
output_tokens_details: {
|
330
|
+
image_tokens: 60,
|
331
|
+
reasoning_tokens: 30,
|
332
|
+
},
|
333
|
+
total_tokens: 300,
|
334
|
+
} as OpenAI.Responses.ResponseUsage;
|
335
|
+
|
336
|
+
// Act
|
337
|
+
const result = convertResponseUsage(responseUsage);
|
338
|
+
|
339
|
+
// Assert
|
340
|
+
expect(result).toEqual({
|
341
|
+
inputTextTokens: 100,
|
342
|
+
inputCacheMissTokens: 100, // 100 - 0
|
343
|
+
totalInputTokens: 100,
|
344
|
+
totalOutputTokens: 200,
|
345
|
+
outputImageTokens: 60,
|
346
|
+
outputReasoningTokens: 30,
|
347
|
+
outputTextTokens: 170, // 200 - 30
|
348
|
+
totalTokens: 300,
|
349
|
+
});
|
350
|
+
});
|
293
351
|
});
|
@@ -20,12 +20,13 @@ export const convertUsage = (
|
|
20
20
|
const totalOutputTokens = usage.completion_tokens;
|
21
21
|
const outputReasoning = usage.completion_tokens_details?.reasoning_tokens || 0;
|
22
22
|
const outputAudioTokens = usage.completion_tokens_details?.audio_tokens || 0;
|
23
|
+
const outputImageTokens = (usage.completion_tokens_details as any)?.image_tokens || 0;
|
23
24
|
|
24
25
|
// XAI 的 completion_tokens 不包含 reasoning_tokens,需要特殊处理
|
25
26
|
const outputTextTokens =
|
26
27
|
provider === 'xai'
|
27
28
|
? totalOutputTokens - outputAudioTokens
|
28
|
-
: totalOutputTokens - outputReasoning - outputAudioTokens;
|
29
|
+
: totalOutputTokens - outputReasoning - outputAudioTokens - outputImageTokens;
|
29
30
|
|
30
31
|
const totalTokens = inputCitationTokens + usage.total_tokens;
|
31
32
|
|
@@ -37,6 +38,7 @@ export const convertUsage = (
|
|
37
38
|
inputCitationTokens: inputCitationTokens,
|
38
39
|
inputTextTokens: inputTextTokens,
|
39
40
|
outputAudioTokens: outputAudioTokens,
|
41
|
+
outputImageTokens: outputImageTokens,
|
40
42
|
outputReasoningTokens: outputReasoning,
|
41
43
|
outputTextTokens: outputTextTokens,
|
42
44
|
rejectedPredictionTokens: usage.completion_tokens_details?.rejected_prediction_tokens,
|
@@ -75,6 +77,7 @@ export const convertResponseUsage = (usage: OpenAI.Responses.ResponseUsage): Mod
|
|
75
77
|
|
76
78
|
// For ResponseUsage, outputTextTokens is totalOutputTokens minus reasoning, as no audio output tokens are specified.
|
77
79
|
const outputTextTokens = totalOutputTokens - outputReasoningTokens;
|
80
|
+
const outputImageTokens = (usage.output_tokens_details as any)?.image_tokens || 0;
|
78
81
|
|
79
82
|
// 3. Construct the comprehensive data object (matching ModelTokensUsage structure)
|
80
83
|
const data = {
|
@@ -87,6 +90,7 @@ export const convertResponseUsage = (usage: OpenAI.Responses.ResponseUsage): Mod
|
|
87
90
|
inputCitationTokens: undefined, // Not in ResponseUsage
|
88
91
|
inputTextTokens: inputTextTokens,
|
89
92
|
outputAudioTokens: undefined, // Not in ResponseUsage
|
93
|
+
outputImageTokens: outputImageTokens,
|
90
94
|
outputReasoningTokens: outputReasoningTokens,
|
91
95
|
outputTextTokens: outputTextTokens,
|
92
96
|
rejectedPredictionTokens: undefined, // Not in ResponseUsage
|
@@ -0,0 +1,52 @@
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
import { chainAbstractChunkText } from '../abstractChunk';
|
4
|
+
|
5
|
+
describe('chainAbstractChunkText', () => {
|
6
|
+
it('should generate correct chat payload for chunk text', () => {
|
7
|
+
const testText = 'This is a sample chunk of text that needs to be summarized.';
|
8
|
+
|
9
|
+
const result = chainAbstractChunkText(testText);
|
10
|
+
|
11
|
+
expect(result).toEqual({
|
12
|
+
messages: [
|
13
|
+
{
|
14
|
+
content:
|
15
|
+
'你是一名擅长从 chunk 中提取摘要的助理,你需要将用户的会话总结为 1~2 句话的摘要,输出成 chunk 所使用的语种',
|
16
|
+
role: 'system',
|
17
|
+
},
|
18
|
+
{
|
19
|
+
content: `chunk: ${testText}`,
|
20
|
+
role: 'user',
|
21
|
+
},
|
22
|
+
],
|
23
|
+
});
|
24
|
+
});
|
25
|
+
|
26
|
+
it('should handle empty text', () => {
|
27
|
+
const result = chainAbstractChunkText('');
|
28
|
+
|
29
|
+
expect(result.messages).toHaveLength(2);
|
30
|
+
expect(result.messages![1].content).toBe('chunk: ');
|
31
|
+
});
|
32
|
+
|
33
|
+
it('should handle text with special characters', () => {
|
34
|
+
const testText = 'Text with special chars: @#$%^&*()';
|
35
|
+
|
36
|
+
const result = chainAbstractChunkText(testText);
|
37
|
+
|
38
|
+
expect(result.messages![1].content).toBe(`chunk: ${testText}`);
|
39
|
+
});
|
40
|
+
|
41
|
+
it('should always use system role for first message', () => {
|
42
|
+
const result = chainAbstractChunkText('test');
|
43
|
+
|
44
|
+
expect(result.messages![0].role).toBe('system');
|
45
|
+
});
|
46
|
+
|
47
|
+
it('should always use user role for second message', () => {
|
48
|
+
const result = chainAbstractChunkText('test');
|
49
|
+
|
50
|
+
expect(result.messages![1].role).toBe('user');
|
51
|
+
});
|
52
|
+
});
|
@@ -0,0 +1,100 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { chainAnswerWithContext } from '../answerWithContext';
|
4
|
+
|
5
|
+
describe('chainAnswerWithContext', () => {
|
6
|
+
it('should generate correct chat payload with context and knowledge', () => {
|
7
|
+
const testParams = {
|
8
|
+
context: ['Context passage 1', 'Context passage 2'],
|
9
|
+
knowledge: ['AI', 'Machine Learning'],
|
10
|
+
question: 'What is artificial intelligence?',
|
11
|
+
};
|
12
|
+
|
13
|
+
const result = chainAnswerWithContext(testParams);
|
14
|
+
|
15
|
+
expect(result.messages).toHaveLength(1);
|
16
|
+
expect(result.messages![0].role).toBe('user');
|
17
|
+
expect(result.messages![0].content).toContain('AI/Machine Learning');
|
18
|
+
expect(result.messages![0].content).toContain('Context passage 1');
|
19
|
+
expect(result.messages![0].content).toContain('Context passage 2');
|
20
|
+
expect(result.messages![0].content).toContain('What is artificial intelligence?');
|
21
|
+
});
|
22
|
+
|
23
|
+
it('should handle single knowledge area', () => {
|
24
|
+
const testParams = {
|
25
|
+
context: ['Single context'],
|
26
|
+
knowledge: ['Technology'],
|
27
|
+
question: 'How does it work?',
|
28
|
+
};
|
29
|
+
|
30
|
+
const result = chainAnswerWithContext(testParams);
|
31
|
+
|
32
|
+
expect(result.messages![0].content).toContain('Technology');
|
33
|
+
});
|
34
|
+
|
35
|
+
it('should handle multiple knowledge areas', () => {
|
36
|
+
const testParams = {
|
37
|
+
context: ['Context'],
|
38
|
+
knowledge: ['AI', 'ML', 'NLP', 'Computer Vision'],
|
39
|
+
question: 'Tell me about these fields',
|
40
|
+
};
|
41
|
+
|
42
|
+
const result = chainAnswerWithContext(testParams);
|
43
|
+
|
44
|
+
expect(result.messages![0].content).toContain('AI/ML/NLP/Computer Vision');
|
45
|
+
});
|
46
|
+
|
47
|
+
it('should handle empty context array', () => {
|
48
|
+
const testParams = {
|
49
|
+
context: [],
|
50
|
+
knowledge: ['AI'],
|
51
|
+
question: 'What is AI?',
|
52
|
+
};
|
53
|
+
|
54
|
+
const result = chainAnswerWithContext(testParams);
|
55
|
+
|
56
|
+
expect(result.messages![0].content).toContain('<Context>');
|
57
|
+
expect(result.messages![0].content).toContain('</Context>');
|
58
|
+
});
|
59
|
+
|
60
|
+
it('should include proper context formatting', () => {
|
61
|
+
const testParams = {
|
62
|
+
context: ['First passage', 'Second passage'],
|
63
|
+
knowledge: ['Test'],
|
64
|
+
question: 'Test question',
|
65
|
+
};
|
66
|
+
|
67
|
+
const result = chainAnswerWithContext(testParams);
|
68
|
+
|
69
|
+
expect(result.messages![0].content).toContain(
|
70
|
+
'<Context>\nFirst passage\nSecond passage\n</Context>',
|
71
|
+
);
|
72
|
+
});
|
73
|
+
|
74
|
+
it('should include proper instructions about using passages', () => {
|
75
|
+
const testParams = {
|
76
|
+
context: ['Context'],
|
77
|
+
knowledge: ['Knowledge'],
|
78
|
+
question: 'Question',
|
79
|
+
};
|
80
|
+
|
81
|
+
const result = chainAnswerWithContext(testParams);
|
82
|
+
const content = result.messages![0].content;
|
83
|
+
|
84
|
+
expect(content).toContain('passages might not be relevant');
|
85
|
+
expect(content).toContain('please only use the passages that are relevant');
|
86
|
+
expect(content).toContain('answer using your knowledge');
|
87
|
+
});
|
88
|
+
|
89
|
+
it('should include markdown formatting instruction', () => {
|
90
|
+
const testParams = {
|
91
|
+
context: ['Context'],
|
92
|
+
knowledge: ['Knowledge'],
|
93
|
+
question: 'Question',
|
94
|
+
};
|
95
|
+
|
96
|
+
const result = chainAnswerWithContext(testParams);
|
97
|
+
|
98
|
+
expect(result.messages![0].content).toContain('follow markdown syntax');
|
99
|
+
});
|
100
|
+
});
|
@@ -0,0 +1,88 @@
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
// Mock DEFAULT_REWRITE_QUERY
|
4
|
+
|
5
|
+
import { DEFAULT_REWRITE_QUERY, chainRewriteQuery } from '../rewriteQuery';
|
6
|
+
|
7
|
+
describe('chainRewriteQuery', () => {
|
8
|
+
it('should generate correct chat payload with default instruction', () => {
|
9
|
+
const query = 'What about the weather?';
|
10
|
+
const context = ['Previous message 1', 'Previous message 2'];
|
11
|
+
|
12
|
+
const result = chainRewriteQuery(query, context);
|
13
|
+
|
14
|
+
expect(result.messages).toHaveLength(2);
|
15
|
+
expect(result.messages![0].role).toBe('system');
|
16
|
+
expect(result.messages![1].role).toBe('user');
|
17
|
+
expect(result.messages![0].content).toContain(DEFAULT_REWRITE_QUERY);
|
18
|
+
expect(result.messages![1].content).toContain(query);
|
19
|
+
});
|
20
|
+
|
21
|
+
it('should include chat history in system message', () => {
|
22
|
+
const query = 'Follow up question';
|
23
|
+
const context = ['User: Hello', 'Assistant: Hi there'];
|
24
|
+
|
25
|
+
const result = chainRewriteQuery(query, context);
|
26
|
+
|
27
|
+
expect(result.messages![0].content).toContain('<chatHistory>');
|
28
|
+
expect(result.messages![0].content).toContain('User: Hello');
|
29
|
+
expect(result.messages![0].content).toContain('Assistant: Hi there');
|
30
|
+
expect(result.messages![0].content).toContain('</chatHistory>');
|
31
|
+
});
|
32
|
+
|
33
|
+
it('should use custom instruction when provided', () => {
|
34
|
+
const query = 'Test query';
|
35
|
+
const context = ['Context'];
|
36
|
+
const customInstruction = 'Custom rewrite instruction';
|
37
|
+
|
38
|
+
const result = chainRewriteQuery(query, context, customInstruction);
|
39
|
+
|
40
|
+
expect(result.messages![0].content).toContain(customInstruction);
|
41
|
+
expect(result.messages![0].content).not.toContain(DEFAULT_REWRITE_QUERY);
|
42
|
+
});
|
43
|
+
|
44
|
+
it('should format user message correctly', () => {
|
45
|
+
const query = 'What is the status?';
|
46
|
+
const context = ['Previous context'];
|
47
|
+
|
48
|
+
const result = chainRewriteQuery(query, context);
|
49
|
+
|
50
|
+
expect(result.messages![1].content).toBe(`Follow Up Input: ${query}, it's standalone query:`);
|
51
|
+
});
|
52
|
+
|
53
|
+
it('should handle empty context array', () => {
|
54
|
+
const query = 'Empty context query';
|
55
|
+
const context: string[] = [];
|
56
|
+
|
57
|
+
const result = chainRewriteQuery(query, context);
|
58
|
+
|
59
|
+
expect(result.messages![0].content).toContain('<chatHistory>\n\n</chatHistory>');
|
60
|
+
});
|
61
|
+
|
62
|
+
it('should handle single context item', () => {
|
63
|
+
const query = 'Single context query';
|
64
|
+
const context = ['Only one message'];
|
65
|
+
|
66
|
+
const result = chainRewriteQuery(query, context);
|
67
|
+
|
68
|
+
expect(result.messages![0].content).toContain('Only one message');
|
69
|
+
});
|
70
|
+
|
71
|
+
it('should join multiple context items with newlines', () => {
|
72
|
+
const query = 'Multi context query';
|
73
|
+
const context = ['Message 1', 'Message 2', 'Message 3'];
|
74
|
+
|
75
|
+
const result = chainRewriteQuery(query, context);
|
76
|
+
|
77
|
+
expect(result.messages![0].content).toContain('Message 1\nMessage 2\nMessage 3');
|
78
|
+
});
|
79
|
+
|
80
|
+
it('should handle special characters in query', () => {
|
81
|
+
const query = 'Query with special chars: @#$%^&*()';
|
82
|
+
const context = ['Context'];
|
83
|
+
|
84
|
+
const result = chainRewriteQuery(query, context);
|
85
|
+
|
86
|
+
expect(result.messages![1].content).toContain(query);
|
87
|
+
});
|
88
|
+
});
|
@@ -0,0 +1,107 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { chainSummaryGenerationTitle } from '../summaryGenerationTitle';
|
4
|
+
|
5
|
+
describe('chainSummaryGenerationTitle', () => {
|
6
|
+
it('should generate correct chat payload for image modal', () => {
|
7
|
+
const prompts = ['A beautiful sunset', 'Mountain landscape'];
|
8
|
+
const modal = 'image' as const;
|
9
|
+
const locale = 'zh-CN';
|
10
|
+
|
11
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
12
|
+
|
13
|
+
expect(result.messages).toHaveLength(2);
|
14
|
+
expect(result.messages![0].role).toBe('system');
|
15
|
+
expect(result.messages![1].role).toBe('user');
|
16
|
+
expect(result.messages![0].content).toContain('AI image prompt');
|
17
|
+
expect(result.messages![0].content).toContain(locale);
|
18
|
+
});
|
19
|
+
|
20
|
+
it('should generate correct chat payload for video modal', () => {
|
21
|
+
const prompts = ['Dancing in the rain'];
|
22
|
+
const modal = 'video' as const;
|
23
|
+
const locale = 'en-US';
|
24
|
+
|
25
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
26
|
+
|
27
|
+
expect(result.messages![0].content).toContain('AI video prompt');
|
28
|
+
expect(result.messages![0].content).toContain(locale);
|
29
|
+
});
|
30
|
+
|
31
|
+
it('should format single prompt correctly', () => {
|
32
|
+
const prompts = ['Single prompt'];
|
33
|
+
const modal = 'image' as const;
|
34
|
+
const locale = 'zh-CN';
|
35
|
+
|
36
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
37
|
+
|
38
|
+
expect(result.messages![1].content).toContain('1. Single prompt');
|
39
|
+
});
|
40
|
+
|
41
|
+
it('should format multiple prompts with numbering', () => {
|
42
|
+
const prompts = ['First prompt', 'Second prompt', 'Third prompt'];
|
43
|
+
const modal = 'image' as const;
|
44
|
+
const locale = 'zh-CN';
|
45
|
+
|
46
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
47
|
+
|
48
|
+
const userMessage = result.messages![1].content;
|
49
|
+
expect(userMessage).toContain('1. First prompt');
|
50
|
+
expect(userMessage).toContain('2. Second prompt');
|
51
|
+
expect(userMessage).toContain('3. Third prompt');
|
52
|
+
});
|
53
|
+
|
54
|
+
it('should include system instructions about title requirements', () => {
|
55
|
+
const prompts = ['Test prompt'];
|
56
|
+
const modal = 'image' as const;
|
57
|
+
const locale = 'zh-CN';
|
58
|
+
|
59
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
60
|
+
|
61
|
+
const systemMessage = result.messages![0].content;
|
62
|
+
expect(systemMessage).toContain('资深的 AI 艺术创作者');
|
63
|
+
expect(systemMessage).toContain('10个字以内');
|
64
|
+
expect(systemMessage).toContain('不需要包含标点符号');
|
65
|
+
});
|
66
|
+
|
67
|
+
it('should handle empty prompts array', () => {
|
68
|
+
const prompts: string[] = [];
|
69
|
+
const modal = 'image' as const;
|
70
|
+
const locale = 'zh-CN';
|
71
|
+
|
72
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
73
|
+
|
74
|
+
expect(result.messages![1].content).toContain('提示词:\n');
|
75
|
+
});
|
76
|
+
|
77
|
+
it('should handle different locales', () => {
|
78
|
+
const prompts = ['Test'];
|
79
|
+
const modal = 'image' as const;
|
80
|
+
const customLocale = 'ja-JP';
|
81
|
+
|
82
|
+
const result = chainSummaryGenerationTitle(prompts, modal, customLocale);
|
83
|
+
|
84
|
+
expect(result.messages![0].content).toContain(customLocale);
|
85
|
+
});
|
86
|
+
|
87
|
+
it('should differentiate between image and video modals in instructions', () => {
|
88
|
+
const prompts = ['Test prompt'];
|
89
|
+
const locale = 'zh-CN';
|
90
|
+
|
91
|
+
const imageResult = chainSummaryGenerationTitle(prompts, 'image', locale);
|
92
|
+
const videoResult = chainSummaryGenerationTitle(prompts, 'video', locale);
|
93
|
+
|
94
|
+
expect(imageResult.messages![0].content).toContain('AI image prompt');
|
95
|
+
expect(videoResult.messages![0].content).toContain('AI video prompt');
|
96
|
+
});
|
97
|
+
|
98
|
+
it('should format prompts with newlines between them', () => {
|
99
|
+
const prompts = ['Prompt one', 'Prompt two'];
|
100
|
+
const modal = 'image' as const;
|
101
|
+
const locale = 'zh-CN';
|
102
|
+
|
103
|
+
const result = chainSummaryGenerationTitle(prompts, modal, locale);
|
104
|
+
|
105
|
+
expect(result.messages![1].content).toContain('1. Prompt one\n2. Prompt two');
|
106
|
+
});
|
107
|
+
});
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import { DEFAULT_MODEL } from '@lobechat/const';
|
2
1
|
import { ChatStreamPayload } from '@lobechat/types';
|
3
2
|
|
4
3
|
export const chainAbstractChunkText = (text: string): Partial<ChatStreamPayload> => {
|
@@ -14,6 +13,5 @@ export const chainAbstractChunkText = (text: string): Partial<ChatStreamPayload>
|
|
14
13
|
role: 'user',
|
15
14
|
},
|
16
15
|
],
|
17
|
-
model: DEFAULT_MODEL,
|
18
16
|
};
|
19
17
|
};
|
@@ -1,6 +1,8 @@
|
|
1
|
-
import { DEFAULT_REWRITE_QUERY } from '@lobechat/const';
|
2
1
|
import { ChatStreamPayload } from '@lobechat/types';
|
3
2
|
|
3
|
+
export const DEFAULT_REWRITE_QUERY =
|
4
|
+
'Given the following conversation and a follow-up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all.';
|
5
|
+
|
4
6
|
export const chainRewriteQuery = (
|
5
7
|
query: string,
|
6
8
|
context: string[],
|