@lobehub/chat 1.128.0 → 1.128.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/.github/workflows/test.yml +8 -1
  2. package/CHANGELOG.md +25 -0
  3. package/changelog/v1.json +9 -0
  4. package/next.config.ts +8 -1
  5. package/package.json +71 -69
  6. package/packages/context-engine/ARCHITECTURE.md +425 -0
  7. package/packages/context-engine/package.json +40 -0
  8. package/packages/context-engine/src/base/BaseProcessor.ts +87 -0
  9. package/packages/context-engine/src/base/BaseProvider.ts +22 -0
  10. package/packages/context-engine/src/index.ts +32 -0
  11. package/packages/context-engine/src/pipeline.ts +219 -0
  12. package/packages/context-engine/src/processors/HistoryTruncate.ts +76 -0
  13. package/packages/context-engine/src/processors/InputTemplate.ts +83 -0
  14. package/packages/context-engine/src/processors/MessageCleanup.ts +87 -0
  15. package/packages/context-engine/src/processors/MessageContent.ts +298 -0
  16. package/packages/context-engine/src/processors/PlaceholderVariables.ts +196 -0
  17. package/packages/context-engine/src/processors/ToolCall.ts +186 -0
  18. package/packages/context-engine/src/processors/ToolMessageReorder.ts +113 -0
  19. package/packages/context-engine/src/processors/__tests__/HistoryTruncate.test.ts +175 -0
  20. package/packages/context-engine/src/processors/__tests__/InputTemplate.test.ts +243 -0
  21. package/packages/context-engine/src/processors/__tests__/MessageContent.test.ts +394 -0
  22. package/packages/context-engine/src/processors/__tests__/PlaceholderVariables.test.ts +334 -0
  23. package/packages/context-engine/src/processors/__tests__/ToolMessageReorder.test.ts +186 -0
  24. package/packages/context-engine/src/processors/index.ts +15 -0
  25. package/packages/context-engine/src/providers/HistorySummary.ts +102 -0
  26. package/packages/context-engine/src/providers/InboxGuide.ts +102 -0
  27. package/packages/context-engine/src/providers/SystemRoleInjector.ts +64 -0
  28. package/packages/context-engine/src/providers/ToolSystemRole.ts +118 -0
  29. package/packages/context-engine/src/providers/__tests__/HistorySummaryProvider.test.ts +112 -0
  30. package/packages/context-engine/src/providers/__tests__/InboxGuideProvider.test.ts +121 -0
  31. package/packages/context-engine/src/providers/__tests__/SystemRoleInjector.test.ts +200 -0
  32. package/packages/context-engine/src/providers/__tests__/ToolSystemRoleProvider.test.ts +140 -0
  33. package/packages/context-engine/src/providers/index.ts +11 -0
  34. package/packages/context-engine/src/types.ts +201 -0
  35. package/packages/context-engine/vitest.config.mts +10 -0
  36. package/packages/database/package.json +1 -1
  37. package/packages/prompts/src/prompts/systemRole/index.ts +1 -1
  38. package/packages/utils/src/index.ts +2 -0
  39. package/packages/utils/src/uriParser.test.ts +29 -0
  40. package/packages/utils/src/uriParser.ts +24 -0
  41. package/src/services/{__tests__ → chat}/chat.test.ts +22 -1032
  42. package/src/services/chat/clientModelRuntime.test.ts +385 -0
  43. package/src/services/chat/clientModelRuntime.ts +34 -0
  44. package/src/services/chat/contextEngineering.test.ts +848 -0
  45. package/src/services/chat/contextEngineering.ts +123 -0
  46. package/src/services/chat/helper.ts +61 -0
  47. package/src/services/{chat.ts → chat/index.ts} +24 -366
  48. package/src/services/chat/types.ts +9 -0
  49. package/src/services/models.ts +1 -1
  50. package/src/store/aiInfra/slices/aiModel/selectors.ts +2 -2
  51. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -40
  52. /package/src/services/{__tests__ → chat}/__snapshots__/chat.test.ts.snap +0 -0
@@ -0,0 +1,848 @@
1
+ import { ChatMessage } from '@lobechat/types';
2
+ import { afterEach, describe, expect, it, vi } from 'vitest';
3
+
4
+ import { contextEngineering } from './contextEngineering';
5
+ import * as helpers from './helper';
6
+
7
+ // Mock VARIABLE_GENERATORS
8
+ vi.mock('@/utils/client/parserPlaceholder', () => ({
9
+ VARIABLE_GENERATORS: {
10
+ date: () => '2023-12-25',
11
+ time: () => '14:30:45',
12
+ username: () => 'TestUser',
13
+ random: () => '12345',
14
+ },
15
+ }));
16
+
17
+ // 默认设置 isServerMode 为 false
18
+ let isServerMode = false;
19
+
20
+ vi.mock('@lobechat/const', async (importOriginal) => {
21
+ const actual = await importOriginal();
22
+ return {
23
+ ...(actual as any),
24
+ get isServerMode() {
25
+ return isServerMode;
26
+ },
27
+ isDeprecatedEdition: false,
28
+ isDesktop: false,
29
+ };
30
+ });
31
+
32
+ afterEach(() => {
33
+ vi.resetModules();
34
+ vi.clearAllMocks();
35
+ });
36
+
37
+ describe('contextEngineering', () => {
38
+ describe('handle with files content in server mode', () => {
39
+ it('should includes files', async () => {
40
+ isServerMode = true;
41
+ // Mock isCanUseVision to return true for vision models
42
+ vi.spyOn(helpers, 'isCanUseVision').mockReturnValue(true);
43
+
44
+ const messages = [
45
+ {
46
+ content: 'Hello',
47
+ role: 'user',
48
+ imageList: [
49
+ {
50
+ id: 'imagecx1',
51
+ url: 'http://example.com/xxx0asd-dsd.png',
52
+ alt: 'ttt.png',
53
+ },
54
+ ],
55
+ fileList: [
56
+ {
57
+ fileType: 'plain/txt',
58
+ size: 100000,
59
+ id: 'file1',
60
+ url: 'http://abc.com/abc.txt',
61
+ name: 'abc.png',
62
+ },
63
+ {
64
+ id: 'file_oKMve9qySLMI',
65
+ name: '2402.16667v1.pdf',
66
+ type: 'application/pdf',
67
+ size: 11256078,
68
+ url: 'https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf',
69
+ },
70
+ ],
71
+ }, // Message with files
72
+ { content: 'Hey', role: 'assistant' }, // Regular user message
73
+ ] as ChatMessage[];
74
+
75
+ const output = await contextEngineering({
76
+ messages,
77
+ model: 'gpt-4o',
78
+ provider: 'openai',
79
+ });
80
+
81
+ expect(output).toEqual([
82
+ {
83
+ content: [
84
+ {
85
+ text: `Hello
86
+
87
+ <!-- SYSTEM CONTEXT (NOT PART OF USER QUERY) -->
88
+ <context.instruction>following part contains context information injected by the system. Please follow these instructions:
89
+
90
+ 1. Always prioritize handling user-visible content.
91
+ 2. the context is only required when user's queries rely on it.
92
+ </context.instruction>
93
+ <files_info>
94
+ <images>
95
+ <images_docstring>here are user upload images you can refer to</images_docstring>
96
+ <image name="ttt.png" url="http://example.com/xxx0asd-dsd.png"></image>
97
+ </images>
98
+ <files>
99
+ <files_docstring>here are user upload files you can refer to</files_docstring>
100
+ <file id="file1" name="abc.png" type="plain/txt" size="100000" url="http://abc.com/abc.txt"></file>
101
+ <file id="file_oKMve9qySLMI" name="2402.16667v1.pdf" type="undefined" size="11256078" url="https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf"></file>
102
+ </files>
103
+ </files_info>
104
+ <!-- END SYSTEM CONTEXT -->`,
105
+ type: 'text',
106
+ },
107
+ {
108
+ image_url: { detail: 'auto', url: 'http://example.com/xxx0asd-dsd.png' },
109
+ type: 'image_url',
110
+ },
111
+ ],
112
+ role: 'user',
113
+ },
114
+ {
115
+ content: 'Hey',
116
+ role: 'assistant',
117
+ },
118
+ ]);
119
+
120
+ isServerMode = false;
121
+ });
122
+
123
+ it('should include image files in server mode', async () => {
124
+ isServerMode = true;
125
+
126
+ vi.spyOn(helpers, 'isCanUseVision').mockReturnValue(false);
127
+
128
+ const messages = [
129
+ {
130
+ content: 'Hello',
131
+ role: 'user',
132
+ imageList: [
133
+ {
134
+ id: 'file1',
135
+ url: 'http://example.com/image.jpg',
136
+ alt: 'abc.png',
137
+ },
138
+ ],
139
+ }, // Message with files
140
+ { content: 'Hey', role: 'assistant' }, // Regular user message
141
+ ] as ChatMessage[];
142
+ const output = await contextEngineering({
143
+ messages,
144
+ provider: 'openai',
145
+ model: 'gpt-4-vision-preview',
146
+ });
147
+
148
+ expect(output).toEqual([
149
+ {
150
+ content: [
151
+ {
152
+ text: `Hello
153
+
154
+ <!-- SYSTEM CONTEXT (NOT PART OF USER QUERY) -->
155
+ <context.instruction>following part contains context information injected by the system. Please follow these instructions:
156
+
157
+ 1. Always prioritize handling user-visible content.
158
+ 2. the context is only required when user's queries rely on it.
159
+ </context.instruction>
160
+ <files_info>
161
+ <images>
162
+ <images_docstring>here are user upload images you can refer to</images_docstring>
163
+ <image name="abc.png" url="http://example.com/image.jpg"></image>
164
+ </images>
165
+
166
+ </files_info>
167
+ <!-- END SYSTEM CONTEXT -->`,
168
+ type: 'text',
169
+ },
170
+ ],
171
+ role: 'user',
172
+ },
173
+ {
174
+ content: 'Hey',
175
+ role: 'assistant',
176
+ },
177
+ ]);
178
+
179
+ isServerMode = false;
180
+ });
181
+ });
182
+
183
+ it('should handle empty tool calls messages correctly', async () => {
184
+ const messages = [
185
+ {
186
+ content: '## Tools\n\nYou can use these tools',
187
+ role: 'system',
188
+ },
189
+ {
190
+ content: '',
191
+ role: 'assistant',
192
+ tool_calls: [],
193
+ },
194
+ ] as ChatMessage[];
195
+
196
+ const result = await contextEngineering({
197
+ messages,
198
+ model: 'gpt-4',
199
+ provider: 'openai',
200
+ });
201
+
202
+ expect(result).toEqual([
203
+ {
204
+ content: '## Tools\n\nYou can use these tools',
205
+ role: 'system',
206
+ },
207
+ {
208
+ content: '',
209
+ role: 'assistant',
210
+ },
211
+ ]);
212
+ });
213
+
214
+ it('should handle assistant messages with reasoning correctly', async () => {
215
+ const messages = [
216
+ {
217
+ role: 'assistant',
218
+ content: 'The answer is 42.',
219
+ reasoning: {
220
+ content: 'I need to calculate the answer to life, universe, and everything.',
221
+ signature: 'thinking_process',
222
+ },
223
+ },
224
+ ] as ChatMessage[];
225
+
226
+ const result = await contextEngineering({
227
+ messages,
228
+ model: 'gpt-4',
229
+ provider: 'openai',
230
+ });
231
+
232
+ expect(result).toEqual([
233
+ {
234
+ content: [
235
+ {
236
+ signature: 'thinking_process',
237
+ thinking: 'I need to calculate the answer to life, universe, and everything.',
238
+ type: 'thinking',
239
+ },
240
+ {
241
+ text: 'The answer is 42.',
242
+ type: 'text',
243
+ },
244
+ ],
245
+ role: 'assistant',
246
+ },
247
+ ]);
248
+ });
249
+
250
+ it('should inject INBOX_GUIDE_SYSTEM_ROLE for welcome questions in inbox session', async () => {
251
+ // Don't mock INBOX_GUIDE_SYSTEMROLE, use the real one
252
+ const messages: ChatMessage[] = [
253
+ {
254
+ role: 'user',
255
+ content: 'Hello, this is my first question',
256
+ createdAt: Date.now(),
257
+ id: 'test-welcome',
258
+ meta: {},
259
+ updatedAt: Date.now(),
260
+ },
261
+ ];
262
+
263
+ const result = await contextEngineering(
264
+ {
265
+ messages,
266
+ model: 'gpt-4',
267
+ provider: 'openai',
268
+ },
269
+ {
270
+ isWelcomeQuestion: true,
271
+ trace: { sessionId: 'inbox' },
272
+ },
273
+ );
274
+
275
+ // Should have system message with inbox guide content
276
+ const systemMessage = result.find((msg) => msg.role === 'system');
277
+ expect(systemMessage).toBeDefined();
278
+ // Check for characteristic content of the actual INBOX_GUIDE_SYSTEMROLE
279
+ expect(systemMessage!.content).toContain('LobeChat Support Assistant');
280
+ expect(systemMessage!.content).toContain('LobeHub');
281
+ expect(Object.keys(systemMessage!).length).toEqual(2);
282
+ });
283
+
284
+ it('should inject historySummary into system message when provided', async () => {
285
+ const historySummary = 'Previous conversation summary: User discussed AI topics.';
286
+
287
+ const messages: ChatMessage[] = [
288
+ {
289
+ role: 'user',
290
+ content: 'Continue our discussion',
291
+ createdAt: Date.now(),
292
+ id: 'test-history',
293
+ meta: {},
294
+ updatedAt: Date.now(),
295
+ },
296
+ ];
297
+
298
+ const result = await contextEngineering(
299
+ {
300
+ messages,
301
+ model: 'gpt-4',
302
+ provider: 'openai',
303
+ },
304
+ {
305
+ historySummary,
306
+ },
307
+ );
308
+
309
+ // Should have system message with history summary
310
+ const systemMessage = result.find((msg) => msg.role === 'system');
311
+ expect(systemMessage).toBeDefined();
312
+ expect(systemMessage!.content).toContain(historySummary);
313
+ expect(Object.keys(systemMessage!).length).toEqual(2);
314
+ });
315
+ describe('getAssistantContent', () => {
316
+ it('should handle assistant message with imageList and content', async () => {
317
+ // Mock isCanUseVision to return true for vision models
318
+ vi.spyOn(helpers, 'isCanUseVision').mockReturnValue(true);
319
+
320
+ const messages: ChatMessage[] = [
321
+ {
322
+ role: 'assistant',
323
+ content: 'Here is an image.',
324
+ imageList: [{ id: 'img1', url: 'http://example.com/image.png', alt: 'test.png' }],
325
+ createdAt: Date.now(),
326
+ id: 'test-id',
327
+ meta: {},
328
+ updatedAt: Date.now(),
329
+ },
330
+ ];
331
+ const result = await contextEngineering({
332
+ messages,
333
+ model: 'gpt-4-vision-preview',
334
+ provider: 'openai',
335
+ });
336
+
337
+ expect(result[0].content).toEqual([
338
+ { text: 'Here is an image.', type: 'text' },
339
+ { image_url: { detail: 'auto', url: 'http://example.com/image.png' }, type: 'image_url' },
340
+ ]);
341
+ });
342
+
343
+ it('should handle assistant message with imageList but no content', async () => {
344
+ // Mock isCanUseVision to return true for vision models
345
+ vi.spyOn(helpers, 'isCanUseVision').mockReturnValue(true);
346
+
347
+ const messages: ChatMessage[] = [
348
+ {
349
+ role: 'assistant',
350
+ content: '',
351
+ imageList: [{ id: 'img1', url: 'http://example.com/image.png', alt: 'test.png' }],
352
+ createdAt: Date.now(),
353
+ id: 'test-id-2',
354
+ meta: {},
355
+ updatedAt: Date.now(),
356
+ },
357
+ ];
358
+ const result = await contextEngineering({
359
+ messages,
360
+ model: 'gpt-4-vision-preview',
361
+ provider: 'openai',
362
+ });
363
+
364
+ expect(result[0].content).toEqual([
365
+ { image_url: { detail: 'auto', url: 'http://example.com/image.png' }, type: 'image_url' },
366
+ ]);
367
+ });
368
+ });
369
+
370
+ it('should not include tool_calls for assistant message if model does not support tools', async () => {
371
+ // Mock isCanUseFC to return false
372
+ vi.spyOn(helpers, 'isCanUseFC').mockReturnValue(false);
373
+
374
+ const messages: ChatMessage[] = [
375
+ {
376
+ role: 'assistant',
377
+ content: 'I have a tool call.',
378
+ tools: [
379
+ {
380
+ id: 'tool_123',
381
+ type: 'default',
382
+ apiName: 'testApi',
383
+ arguments: '{}',
384
+ identifier: 'test-plugin',
385
+ },
386
+ ],
387
+ createdAt: Date.now(),
388
+ id: 'test-id-3',
389
+ meta: {},
390
+ updatedAt: Date.now(),
391
+ },
392
+ ];
393
+
394
+ const result = await contextEngineering({
395
+ messages,
396
+ model: 'some-model-without-fc',
397
+ provider: 'openai',
398
+ });
399
+
400
+ expect(result[0].tool_calls).toBeUndefined();
401
+ expect(result[0].content).toBe('I have a tool call.');
402
+ });
403
+
404
+ describe('Process placeholder variables', () => {
405
+ it('should process placeholder variables in string content', async () => {
406
+ const messages: ChatMessage[] = [
407
+ {
408
+ role: 'user',
409
+ content: 'Hello {{username}}, today is {{date}} and the time is {{time}}',
410
+ createdAt: Date.now(),
411
+ id: 'test-placeholder-1',
412
+ meta: {},
413
+ updatedAt: Date.now(),
414
+ },
415
+ {
416
+ role: 'assistant',
417
+ content: 'Hi there! Your random number is {{random}}',
418
+ createdAt: Date.now(),
419
+ id: 'test-placeholder-2',
420
+ meta: {},
421
+ updatedAt: Date.now(),
422
+ },
423
+ ];
424
+
425
+ const result = await contextEngineering({
426
+ messages,
427
+ model: 'gpt-4',
428
+ provider: 'openai',
429
+ });
430
+
431
+ expect(result[0].content).toBe(
432
+ 'Hello TestUser, today is 2023-12-25 and the time is 14:30:45',
433
+ );
434
+ expect(result[1].content).toBe('Hi there! Your random number is 12345');
435
+ });
436
+
437
+ it('should process placeholder variables in array content', async () => {
438
+ const messages = [
439
+ {
440
+ role: 'user',
441
+ content: [
442
+ {
443
+ type: 'text',
444
+ text: 'Hello {{username}}, today is {{date}}',
445
+ },
446
+ {
447
+ type: 'image_url',
448
+ image_url: { url: 'data:image/png;base64,abc123' },
449
+ },
450
+ ],
451
+ createdAt: Date.now(),
452
+ id: 'test-placeholder-array',
453
+ meta: {},
454
+ updatedAt: Date.now(),
455
+ },
456
+ ] as any;
457
+
458
+ const result = await contextEngineering({
459
+ messages,
460
+ model: 'gpt-4',
461
+ provider: 'openai',
462
+ });
463
+
464
+ expect(Array.isArray(result[0].content)).toBe(true);
465
+ const content = result[0].content as any[];
466
+ expect(content[0].text).toBe('Hello TestUser, today is 2023-12-25');
467
+ expect(content[1].image_url.url).toBe('data:image/png;base64,abc123');
468
+ });
469
+
470
+ it('should handle missing placeholder variables gracefully', async () => {
471
+ const messages: ChatMessage[] = [
472
+ {
473
+ role: 'user',
474
+ content: 'Hello {{username}}, missing: {{missing_var}}',
475
+ createdAt: Date.now(),
476
+ id: 'test-placeholder-missing',
477
+ meta: {},
478
+ updatedAt: Date.now(),
479
+ },
480
+ ];
481
+
482
+ const result = await contextEngineering({
483
+ messages,
484
+ model: 'gpt-4',
485
+ provider: 'openai',
486
+ });
487
+
488
+ expect(result[0].content).toBe('Hello TestUser, missing: {{missing_var}}');
489
+ });
490
+
491
+ it('should not modify messages without placeholder variables', async () => {
492
+ const messages: ChatMessage[] = [
493
+ {
494
+ role: 'user',
495
+ content: 'Hello there, no variables here',
496
+ createdAt: Date.now(),
497
+ id: 'test-no-placeholders',
498
+ meta: {},
499
+ updatedAt: Date.now(),
500
+ },
501
+ ];
502
+
503
+ const result = await contextEngineering({
504
+ messages,
505
+ model: 'gpt-4',
506
+ provider: 'openai',
507
+ });
508
+
509
+ expect(result[0].content).toBe('Hello there, no variables here');
510
+ });
511
+
512
+ it('should process placeholder variables combined with other processors', async () => {
513
+ isServerMode = true;
514
+ vi.spyOn(helpers, 'isCanUseVision').mockReturnValue(true);
515
+
516
+ const messages: ChatMessage[] = [
517
+ {
518
+ role: 'user',
519
+ content: 'Hello {{username}}, check this image from {{date}}',
520
+ imageList: [
521
+ {
522
+ id: 'img1',
523
+ url: 'http://example.com/test.jpg',
524
+ alt: 'test image',
525
+ },
526
+ ],
527
+ createdAt: Date.now(),
528
+ id: 'test-combined',
529
+ meta: {},
530
+ updatedAt: Date.now(),
531
+ },
532
+ ];
533
+
534
+ const result = await contextEngineering({
535
+ messages,
536
+ model: 'gpt-4o',
537
+ provider: 'openai',
538
+ });
539
+
540
+ expect(Array.isArray(result[0].content)).toBe(true);
541
+ const content = result[0].content as any[];
542
+
543
+ // Should contain processed placeholder variables in the text content
544
+ expect(content[0].text).toContain('Hello TestUser, check this image from 2023-12-25');
545
+
546
+ // Should also contain file context from MessageContentProcessor
547
+ expect(content[0].text).toContain('SYSTEM CONTEXT');
548
+
549
+ // Should contain image from vision processing
550
+ expect(content[1].type).toBe('image_url');
551
+ expect(content[1].image_url.url).toBe('http://example.com/test.jpg');
552
+
553
+ isServerMode = false;
554
+ });
555
+ });
556
+
557
+ describe('Message preprocessing processors', () => {
558
+ it('should truncate message history when enabled', async () => {
559
+ const messages: ChatMessage[] = [
560
+ {
561
+ role: 'user',
562
+ content: 'Message 1',
563
+ createdAt: Date.now(),
564
+ id: 'test-1',
565
+ meta: {},
566
+ updatedAt: Date.now(),
567
+ },
568
+ {
569
+ role: 'assistant',
570
+ content: 'Response 1',
571
+ createdAt: Date.now(),
572
+ id: 'test-2',
573
+ meta: {},
574
+ updatedAt: Date.now(),
575
+ },
576
+ {
577
+ role: 'user',
578
+ content: 'Message 2',
579
+ createdAt: Date.now(),
580
+ id: 'test-3',
581
+ meta: {},
582
+ updatedAt: Date.now(),
583
+ },
584
+ {
585
+ role: 'assistant',
586
+ content: 'Response 2',
587
+ createdAt: Date.now(),
588
+ id: 'test-4',
589
+ meta: {},
590
+ updatedAt: Date.now(),
591
+ },
592
+ {
593
+ role: 'user',
594
+ content: 'Latest message',
595
+ createdAt: Date.now(),
596
+ id: 'test-5',
597
+ meta: {},
598
+ updatedAt: Date.now(),
599
+ },
600
+ ];
601
+
602
+ const result = await contextEngineering({
603
+ messages,
604
+ model: 'gpt-4',
605
+ provider: 'openai',
606
+ enableHistoryCount: true,
607
+ historyCount: 4, // Should keep last 2 messages
608
+ });
609
+
610
+ // Should only keep the last 2 messages
611
+ expect(result).toHaveLength(4);
612
+ expect(result).toEqual([
613
+ { content: 'Response 1', role: 'assistant' },
614
+ { content: 'Message 2', role: 'user' },
615
+ { content: 'Response 2', role: 'assistant' },
616
+ { content: 'Latest message', role: 'user' },
617
+ ]);
618
+ });
619
+
620
+ it('should apply input template to user messages', async () => {
621
+ const messages: ChatMessage[] = [
622
+ {
623
+ role: 'user',
624
+ content: 'Original user input',
625
+ createdAt: Date.now(),
626
+ id: 'test-template',
627
+ meta: {},
628
+ updatedAt: Date.now(),
629
+ },
630
+ {
631
+ role: 'assistant',
632
+ content: 'Assistant response',
633
+ createdAt: Date.now(),
634
+ id: 'test-assistant',
635
+ meta: {},
636
+ updatedAt: Date.now(),
637
+ },
638
+ ];
639
+
640
+ const result = await contextEngineering({
641
+ messages,
642
+ model: 'gpt-4',
643
+ provider: 'openai',
644
+ inputTemplate: 'Template: {{text}} - End',
645
+ });
646
+
647
+ // Should apply template to user message only
648
+ expect(result).toEqual([
649
+ {
650
+ content: 'Template: Original user input - End',
651
+ role: 'user',
652
+ },
653
+ {
654
+ role: 'assistant',
655
+ content: 'Assistant response',
656
+ },
657
+ ]);
658
+ expect(result[1].content).toBe('Assistant response'); // Unchanged
659
+ });
660
+
661
+ it('should inject system role at the beginning', async () => {
662
+ const messages: ChatMessage[] = [
663
+ {
664
+ role: 'user',
665
+ content: 'User message',
666
+ createdAt: Date.now(),
667
+ id: 'test-user',
668
+ meta: {},
669
+ updatedAt: Date.now(),
670
+ },
671
+ ];
672
+
673
+ const result = await contextEngineering({
674
+ messages,
675
+ model: 'gpt-4',
676
+ provider: 'openai',
677
+ systemRole: 'You are a helpful assistant.',
678
+ });
679
+
680
+ // Should have system role at the beginning
681
+ expect(result).toEqual([
682
+ { content: 'You are a helpful assistant.', role: 'system' },
683
+ { content: 'User message', role: 'user' },
684
+ ]);
685
+ });
686
+
687
+ it('should combine all preprocessing steps correctly', async () => {
688
+ const messages: ChatMessage[] = [
689
+ {
690
+ role: 'user',
691
+ content: 'Old message 1',
692
+ createdAt: Date.now(),
693
+ id: 'test-old-1',
694
+ meta: {},
695
+ updatedAt: Date.now(),
696
+ },
697
+ {
698
+ role: 'assistant',
699
+ content: 'Old response',
700
+ createdAt: Date.now(),
701
+ id: 'test-old-2',
702
+ meta: {},
703
+ updatedAt: Date.now(),
704
+ },
705
+ {
706
+ role: 'user',
707
+ content: 'Recent input with {{username}}',
708
+ createdAt: Date.now(),
709
+ id: 'test-recent',
710
+ meta: {},
711
+ updatedAt: Date.now(),
712
+ },
713
+ ];
714
+
715
+ const result = await contextEngineering({
716
+ messages,
717
+ model: 'gpt-4',
718
+ provider: 'openai',
719
+ systemRole: 'System instructions.',
720
+ inputTemplate: 'Processed: {{text}}',
721
+ enableHistoryCount: true,
722
+ historyCount: 2, // Should keep last 1 message
723
+ });
724
+
725
+ // System role should be first
726
+ expect(result).toEqual([
727
+ {
728
+ content: 'System instructions.',
729
+ role: 'system',
730
+ },
731
+ {
732
+ role: 'assistant',
733
+ content: 'Old response',
734
+ },
735
+ {
736
+ content: 'Processed: Recent input with TestUser',
737
+ role: 'user',
738
+ },
739
+ ]);
740
+ });
741
+
742
+ it('should skip preprocessing when no configuration is provided', async () => {
743
+ const messages: ChatMessage[] = [
744
+ {
745
+ role: 'user',
746
+ content: 'Simple message',
747
+ createdAt: Date.now(),
748
+ id: 'test-simple',
749
+ meta: {},
750
+ updatedAt: Date.now(),
751
+ },
752
+ ];
753
+
754
+ const result = await contextEngineering({
755
+ messages,
756
+ model: 'gpt-4',
757
+ provider: 'openai',
758
+ });
759
+
760
+ // Should pass message unchanged
761
+ expect(result).toEqual([
762
+ {
763
+ content: 'Simple message',
764
+ role: 'user',
765
+ },
766
+ ]);
767
+ });
768
+
769
+ it('should handle history truncation with system role injection correctly', async () => {
770
+ const messages: ChatMessage[] = [
771
+ {
772
+ role: 'user',
773
+ content: 'Message 1',
774
+ createdAt: Date.now(),
775
+ id: 'test-1',
776
+ meta: {},
777
+ updatedAt: Date.now(),
778
+ },
779
+ {
780
+ role: 'user',
781
+ content: 'Message 2',
782
+ createdAt: Date.now(),
783
+ id: 'test-2',
784
+ meta: {},
785
+ updatedAt: Date.now(),
786
+ },
787
+ {
788
+ role: 'user',
789
+ content: 'Message 3',
790
+ createdAt: Date.now(),
791
+ id: 'test-3',
792
+ meta: {},
793
+ updatedAt: Date.now(),
794
+ },
795
+ ];
796
+
797
+ const result = await contextEngineering({
798
+ messages,
799
+ model: 'gpt-4',
800
+ provider: 'openai',
801
+ systemRole: 'System role here.',
802
+ enableHistoryCount: true,
803
+ historyCount: 1, // Should keep only 1 message
804
+ });
805
+
806
+ // Should have system role + 1 truncated message
807
+ expect(result).toEqual([
808
+ {
809
+ content: 'System role here.',
810
+ role: 'system',
811
+ },
812
+ {
813
+ content: 'Message 3', // Only the last message should remain
814
+ role: 'user',
815
+ },
816
+ ]);
817
+ });
818
+
819
+ it('should handle input template compilation errors gracefully', async () => {
820
+ const messages: ChatMessage[] = [
821
+ {
822
+ role: 'user',
823
+ content: 'User message',
824
+ createdAt: Date.now(),
825
+ id: 'test-error',
826
+ meta: {},
827
+ updatedAt: Date.now(),
828
+ },
829
+ ];
830
+
831
+ // This should not throw an error, but handle it gracefully
832
+ const result = await contextEngineering({
833
+ messages,
834
+ model: 'gpt-4',
835
+ provider: 'openai',
836
+ inputTemplate: '<%- invalid javascript syntax %>',
837
+ });
838
+
839
+ // Should keep original message when template fails
840
+ expect(result).toEqual([
841
+ {
842
+ content: 'User message',
843
+ role: 'user',
844
+ },
845
+ ]);
846
+ });
847
+ });
848
+ });