converse-mcp-server 2.3.1 → 2.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +771 -738
- package/docs/API.md +10 -1
- package/docs/PROVIDERS.md +8 -4
- package/package.json +12 -12
- package/src/async/asyncJobStore.js +82 -52
- package/src/async/eventBus.js +25 -20
- package/src/async/fileCache.js +121 -40
- package/src/async/jobRunner.js +65 -39
- package/src/async/providerStreamNormalizer.js +203 -117
- package/src/config.js +374 -102
- package/src/continuationStore.js +32 -24
- package/src/index.js +45 -25
- package/src/prompts/helpPrompt.js +328 -305
- package/src/providers/anthropic.js +303 -119
- package/src/providers/codex.js +103 -45
- package/src/providers/deepseek.js +24 -8
- package/src/providers/google.js +337 -93
- package/src/providers/index.js +1 -1
- package/src/providers/interface.js +16 -11
- package/src/providers/mistral.js +179 -69
- package/src/providers/openai-compatible.js +231 -94
- package/src/providers/openai.js +1094 -914
- package/src/providers/openrouter-endpoints-client.js +220 -216
- package/src/providers/openrouter.js +426 -381
- package/src/providers/xai.js +153 -56
- package/src/resources/helpResource.js +70 -67
- package/src/router.js +95 -67
- package/src/services/summarizationService.js +51 -24
- package/src/systemPrompts.js +89 -89
- package/src/tools/cancelJob.js +31 -19
- package/src/tools/chat.js +997 -883
- package/src/tools/checkStatus.js +86 -65
- package/src/tools/consensus.js +400 -234
- package/src/tools/index.js +39 -16
- package/src/transport/httpTransport.js +82 -55
- package/src/utils/contextProcessor.js +54 -37
- package/src/utils/errorHandler.js +95 -45
- package/src/utils/fileValidator.js +107 -98
- package/src/utils/formatStatus.js +122 -64
- package/src/utils/logger.js +459 -449
- package/src/utils/pathUtils.js +2 -2
- package/src/utils/tokenLimiter.js +216 -216
package/src/tools/chat.js
CHANGED
|
@@ -1,883 +1,997 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Chat Tool
|
|
3
|
-
*
|
|
4
|
-
* Single-provider conversational AI with context and continuation support.
|
|
5
|
-
* Handles context processing, provider calls, and state management.
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
import { createToolResponse, createToolError } from './index.js';
|
|
9
|
-
import {
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
import {
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
},
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
const
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
//
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
if (
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
const
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
//
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
return '
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
//
|
|
445
|
-
if (
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
//
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
1
|
+
/**
|
|
2
|
+
* Chat Tool
|
|
3
|
+
*
|
|
4
|
+
* Single-provider conversational AI with context and continuation support.
|
|
5
|
+
* Handles context processing, provider calls, and state management.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { createToolResponse, createToolError } from './index.js';
|
|
9
|
+
import {
|
|
10
|
+
processUnifiedContext,
|
|
11
|
+
createFileContext,
|
|
12
|
+
} from '../utils/contextProcessor.js';
|
|
13
|
+
import {
|
|
14
|
+
generateContinuationId,
|
|
15
|
+
addMessageToHistory,
|
|
16
|
+
} from '../continuationStore.js';
|
|
17
|
+
import { debugLog, debugError } from '../utils/console.js';
|
|
18
|
+
import { createLogger } from '../utils/logger.js';
|
|
19
|
+
import { CHAT_PROMPT } from '../systemPrompts.js';
|
|
20
|
+
import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
|
|
21
|
+
import { validateAllPaths } from '../utils/fileValidator.js';
|
|
22
|
+
import { SummarizationService } from '../services/summarizationService.js';
|
|
23
|
+
|
|
24
|
+
const logger = createLogger('chat');
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Chat tool implementation
|
|
28
|
+
* @param {object} args - Tool arguments
|
|
29
|
+
* @param {object} dependencies - Injected dependencies (config, providers, continuationStore)
|
|
30
|
+
* @returns {object} MCP tool response
|
|
31
|
+
*/
|
|
32
|
+
export async function chatTool(args, dependencies) {
|
|
33
|
+
try {
|
|
34
|
+
const {
|
|
35
|
+
config,
|
|
36
|
+
providers,
|
|
37
|
+
continuationStore,
|
|
38
|
+
contextProcessor,
|
|
39
|
+
jobRunner,
|
|
40
|
+
providerStreamNormalizer,
|
|
41
|
+
} = dependencies;
|
|
42
|
+
|
|
43
|
+
// Validate required arguments
|
|
44
|
+
if (!args.prompt || typeof args.prompt !== 'string') {
|
|
45
|
+
return createToolError('Prompt is required and must be a string');
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// Extract and validate arguments
|
|
49
|
+
const {
|
|
50
|
+
prompt,
|
|
51
|
+
model = 'auto',
|
|
52
|
+
files = [],
|
|
53
|
+
continuation_id,
|
|
54
|
+
temperature = 0.5,
|
|
55
|
+
use_websearch = false,
|
|
56
|
+
images = [],
|
|
57
|
+
reasoning_effort = 'medium',
|
|
58
|
+
verbosity = 'medium',
|
|
59
|
+
async = false,
|
|
60
|
+
} = args;
|
|
61
|
+
|
|
62
|
+
// Handle async execution mode
|
|
63
|
+
if (async) {
|
|
64
|
+
// Validate async dependencies are available
|
|
65
|
+
if (!jobRunner || !providerStreamNormalizer) {
|
|
66
|
+
return createToolError(
|
|
67
|
+
'Async execution not available - missing async dependencies',
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Generate or use existing continuation ID for the conversation
|
|
72
|
+
const conversationContinuationId =
|
|
73
|
+
continuation_id || generateContinuationId();
|
|
74
|
+
|
|
75
|
+
// Get provider and model info for the job
|
|
76
|
+
const providerName = mapModelToProvider(args.model || 'auto', providers);
|
|
77
|
+
const resolvedModel =
|
|
78
|
+
providers[providerName]?.resolveModel?.(args.model) ||
|
|
79
|
+
args.model ||
|
|
80
|
+
'auto';
|
|
81
|
+
|
|
82
|
+
// Generate title early for initial response
|
|
83
|
+
const summarizationService = new SummarizationService(providers, config);
|
|
84
|
+
let title = null;
|
|
85
|
+
try {
|
|
86
|
+
title = await summarizationService.generateTitle(prompt);
|
|
87
|
+
debugLog(`Chat: Generated title for initial response - "${title}"`);
|
|
88
|
+
} catch (error) {
|
|
89
|
+
debugError(
|
|
90
|
+
'Chat: Failed to generate title for initial response',
|
|
91
|
+
error,
|
|
92
|
+
);
|
|
93
|
+
title = prompt.substring(0, 50);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
try {
|
|
97
|
+
// Submit background job using continuation_id as the job identifier
|
|
98
|
+
const jobId = await jobRunner.submit(
|
|
99
|
+
{
|
|
100
|
+
tool: 'chat',
|
|
101
|
+
sessionId: 'local-user', // Use standard session ID
|
|
102
|
+
options: {
|
|
103
|
+
...args,
|
|
104
|
+
jobId: conversationContinuationId, // Use continuation_id as job ID
|
|
105
|
+
continuation_id: conversationContinuationId, // Pass the conversation continuation ID
|
|
106
|
+
provider: providerName, // Add provider info for status display
|
|
107
|
+
model: resolvedModel, // Add resolved model info for status display
|
|
108
|
+
title, // Pass the generated title
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
async (context) => {
|
|
112
|
+
// Execute chat in background using stream normalizer
|
|
113
|
+
return await executeChatWithStreaming(
|
|
114
|
+
args,
|
|
115
|
+
{
|
|
116
|
+
...dependencies,
|
|
117
|
+
continuationId: conversationContinuationId,
|
|
118
|
+
title, // Pass title to execution context
|
|
119
|
+
},
|
|
120
|
+
context,
|
|
121
|
+
);
|
|
122
|
+
},
|
|
123
|
+
);
|
|
124
|
+
|
|
125
|
+
// Format initial response like check_status output
|
|
126
|
+
const startTime = new Date()
|
|
127
|
+
.toLocaleString('en-GB', {
|
|
128
|
+
day: '2-digit',
|
|
129
|
+
month: '2-digit',
|
|
130
|
+
year: 'numeric',
|
|
131
|
+
hour: '2-digit',
|
|
132
|
+
minute: '2-digit',
|
|
133
|
+
second: '2-digit',
|
|
134
|
+
hour12: false,
|
|
135
|
+
})
|
|
136
|
+
.replace(',', '');
|
|
137
|
+
|
|
138
|
+
const statusLine = `⏳ SUBMITTED | CHAT | ${conversationContinuationId} | 1/1 | Started: ${startTime} | "${title || 'Processing...'}" | ${providerName}/${resolvedModel}`;
|
|
139
|
+
|
|
140
|
+
// Return formatted response with status line and continuation_id
|
|
141
|
+
return createToolResponse({
|
|
142
|
+
content: `${statusLine}\ncontinuation_id: ${conversationContinuationId}`,
|
|
143
|
+
continuation: {
|
|
144
|
+
id: conversationContinuationId, // Use continuation_id as the primary ID
|
|
145
|
+
status: 'processing',
|
|
146
|
+
},
|
|
147
|
+
async_execution: true,
|
|
148
|
+
});
|
|
149
|
+
} catch (error) {
|
|
150
|
+
logger.error('Failed to submit async chat job', { error });
|
|
151
|
+
return createToolError(`Async execution failed: ${error.message}`);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
let conversationHistory = [];
|
|
156
|
+
let continuationId = continuation_id;
|
|
157
|
+
|
|
158
|
+
// Load existing conversation if continuation_id provided
|
|
159
|
+
if (continuationId) {
|
|
160
|
+
try {
|
|
161
|
+
const existingState = await continuationStore.get(continuationId);
|
|
162
|
+
if (existingState) {
|
|
163
|
+
conversationHistory = existingState.messages || [];
|
|
164
|
+
} else {
|
|
165
|
+
// Invalid continuation ID - start fresh with new ID
|
|
166
|
+
continuationId = generateContinuationId();
|
|
167
|
+
}
|
|
168
|
+
} catch (error) {
|
|
169
|
+
logger.error('Error loading conversation', { error });
|
|
170
|
+
// Continue with fresh conversation on error
|
|
171
|
+
continuationId = generateContinuationId();
|
|
172
|
+
}
|
|
173
|
+
} else {
|
|
174
|
+
// Generate new continuation ID for new conversation
|
|
175
|
+
continuationId = generateContinuationId();
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// Validate file paths before processing
|
|
179
|
+
if (files.length > 0 || images.length > 0) {
|
|
180
|
+
const validation = await validateAllPaths(
|
|
181
|
+
{ files, images },
|
|
182
|
+
{ clientCwd: config.server?.client_cwd },
|
|
183
|
+
);
|
|
184
|
+
if (!validation.valid) {
|
|
185
|
+
logger.error('File validation failed', { errors: validation.errors });
|
|
186
|
+
return validation.errorResponse;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Process context (files, images, web search)
|
|
191
|
+
let contextMessage = null;
|
|
192
|
+
if (files.length > 0 || images.length > 0 || use_websearch) {
|
|
193
|
+
try {
|
|
194
|
+
const contextRequest = {
|
|
195
|
+
files: Array.isArray(files) ? files : [],
|
|
196
|
+
images: Array.isArray(images) ? images : [],
|
|
197
|
+
webSearch: use_websearch ? prompt : null,
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
const contextResult = await contextProcessor.processUnifiedContext(
|
|
201
|
+
contextRequest,
|
|
202
|
+
{
|
|
203
|
+
enforceSecurityCheck: false, // Allow files from any location
|
|
204
|
+
skipSecurityCheck: true, // Legacy flag for backward compatibility
|
|
205
|
+
clientCwd: config.server?.client_cwd, // Use auto-detected client working directory
|
|
206
|
+
},
|
|
207
|
+
);
|
|
208
|
+
|
|
209
|
+
// Create context message from files and images
|
|
210
|
+
const allProcessedFiles = [
|
|
211
|
+
...contextResult.files,
|
|
212
|
+
...contextResult.images,
|
|
213
|
+
];
|
|
214
|
+
if (allProcessedFiles.length > 0) {
|
|
215
|
+
contextMessage = createFileContext(allProcessedFiles, {
|
|
216
|
+
includeMetadata: true,
|
|
217
|
+
includeErrors: true,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Add web search results if available (placeholder for now)
|
|
222
|
+
if (contextResult.webSearch && !contextResult.webSearch.placeholder) {
|
|
223
|
+
// Future implementation: add web search results to context
|
|
224
|
+
logger.debug('Web search results available but not yet implemented');
|
|
225
|
+
}
|
|
226
|
+
} catch (error) {
|
|
227
|
+
logger.error('Error processing context', { error });
|
|
228
|
+
// Continue without context if processing fails
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Build message array for provider
|
|
233
|
+
const messages = [];
|
|
234
|
+
|
|
235
|
+
// Add system prompt only if not already in conversation history
|
|
236
|
+
if (
|
|
237
|
+
conversationHistory.length === 0 ||
|
|
238
|
+
conversationHistory[0].role !== 'system'
|
|
239
|
+
) {
|
|
240
|
+
messages.push({
|
|
241
|
+
role: 'system',
|
|
242
|
+
content: CHAT_PROMPT,
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Add conversation history
|
|
247
|
+
messages.push(...conversationHistory);
|
|
248
|
+
|
|
249
|
+
// Add user prompt with context
|
|
250
|
+
const userMessage = {
|
|
251
|
+
role: 'user',
|
|
252
|
+
content: prompt, // default to simple string content
|
|
253
|
+
};
|
|
254
|
+
|
|
255
|
+
// If we have context (files/images), create complex content array
|
|
256
|
+
if (contextMessage && contextMessage.content) {
|
|
257
|
+
// Create complex content array
|
|
258
|
+
userMessage.content = [
|
|
259
|
+
...contextMessage.content, // Include all file/image parts
|
|
260
|
+
{ type: 'text', text: prompt }, // Add the user prompt as text
|
|
261
|
+
];
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
messages.push(userMessage);
|
|
265
|
+
|
|
266
|
+
// Select provider
|
|
267
|
+
let selectedProvider;
|
|
268
|
+
let providerName;
|
|
269
|
+
|
|
270
|
+
if (model === 'auto') {
|
|
271
|
+
// Auto-select first available provider
|
|
272
|
+
const availableProviders = Object.keys(providers).filter((name) => {
|
|
273
|
+
const provider = providers[name];
|
|
274
|
+
return provider && provider.isAvailable && provider.isAvailable(config);
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
if (availableProviders.length === 0) {
|
|
278
|
+
return createToolError(
|
|
279
|
+
'No providers available. Please configure at least one API key.',
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
providerName = availableProviders[0];
|
|
284
|
+
selectedProvider = providers[providerName];
|
|
285
|
+
} else {
|
|
286
|
+
// Use specified provider/model
|
|
287
|
+
// Try to map model to provider
|
|
288
|
+
providerName = mapModelToProvider(model, providers);
|
|
289
|
+
selectedProvider = providers[providerName];
|
|
290
|
+
|
|
291
|
+
if (!selectedProvider) {
|
|
292
|
+
return createToolError(`Provider not found for model: ${model}`);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
if (!selectedProvider.isAvailable(config)) {
|
|
296
|
+
return createToolError(
|
|
297
|
+
`Provider ${providerName} is not available. Check API key configuration.`,
|
|
298
|
+
);
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
// Resolve model name and prepare provider options
|
|
303
|
+
const resolvedModel = resolveAutoModel(model, providerName);
|
|
304
|
+
const providerOptions = {
|
|
305
|
+
model: resolvedModel,
|
|
306
|
+
temperature,
|
|
307
|
+
reasoning_effort,
|
|
308
|
+
verbosity,
|
|
309
|
+
use_websearch,
|
|
310
|
+
config,
|
|
311
|
+
continuation_id, // Pass for thread resumption
|
|
312
|
+
continuationStore, // Pass store for state management
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
// Call provider
|
|
316
|
+
let response;
|
|
317
|
+
const startTime = Date.now();
|
|
318
|
+
try {
|
|
319
|
+
response = await selectedProvider.invoke(messages, providerOptions);
|
|
320
|
+
} catch (error) {
|
|
321
|
+
logger.error('Provider error', {
|
|
322
|
+
error,
|
|
323
|
+
data: { provider: providerName },
|
|
324
|
+
});
|
|
325
|
+
return createToolError(`Provider error: ${error.message}`);
|
|
326
|
+
}
|
|
327
|
+
const executionTime = (Date.now() - startTime) / 1000; // Convert to seconds
|
|
328
|
+
|
|
329
|
+
// Validate response
|
|
330
|
+
if (!response || !response.content) {
|
|
331
|
+
return createToolError('Provider returned invalid response');
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Add assistant response to conversation history
|
|
335
|
+
const assistantMessage = {
|
|
336
|
+
role: 'assistant',
|
|
337
|
+
content: response.content,
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
const updatedMessages = [...messages, assistantMessage];
|
|
341
|
+
|
|
342
|
+
// Save conversation state
|
|
343
|
+
try {
|
|
344
|
+
const conversationState = {
|
|
345
|
+
messages: updatedMessages,
|
|
346
|
+
provider: providerName,
|
|
347
|
+
model,
|
|
348
|
+
lastUpdated: Date.now(),
|
|
349
|
+
// Store Codex thread ID if available (for thread resumption)
|
|
350
|
+
codexThreadId: response.metadata?.threadId,
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
await continuationStore.set(continuationId, conversationState);
|
|
354
|
+
} catch (error) {
|
|
355
|
+
logger.error('Error saving conversation', { error });
|
|
356
|
+
// Continue even if save fails
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
// Create unified status line (similar to async status display)
|
|
360
|
+
const statusLine =
|
|
361
|
+
config.environment?.nodeEnv !== 'test'
|
|
362
|
+
? `✅ COMPLETED | CHAT | ${continuationId} | ${executionTime.toFixed(1)}s elapsed | ${providerName}/${resolvedModel}\n`
|
|
363
|
+
: '';
|
|
364
|
+
|
|
365
|
+
// Always include continuation_id line for clarity
|
|
366
|
+
const continuationIdLine = `continuation_id: ${continuationId}\n\n`;
|
|
367
|
+
|
|
368
|
+
const result = {
|
|
369
|
+
content: statusLine + continuationIdLine + response.content,
|
|
370
|
+
continuation: {
|
|
371
|
+
id: continuationId,
|
|
372
|
+
provider: providerName,
|
|
373
|
+
model,
|
|
374
|
+
messageCount: updatedMessages.filter((msg) => msg.role !== 'system')
|
|
375
|
+
.length,
|
|
376
|
+
},
|
|
377
|
+
};
|
|
378
|
+
|
|
379
|
+
// Add metadata if available
|
|
380
|
+
if (response.metadata) {
|
|
381
|
+
result.metadata = response.metadata;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Apply token limiting to the final response
|
|
385
|
+
const tokenLimit = getTokenLimit(config);
|
|
386
|
+
const resultStr = JSON.stringify(result, null, 2);
|
|
387
|
+
const limitedResult = applyTokenLimit(resultStr, tokenLimit);
|
|
388
|
+
|
|
389
|
+
// Parse the limited result back to object format to preserve structure
|
|
390
|
+
let finalResult;
|
|
391
|
+
try {
|
|
392
|
+
finalResult = JSON.parse(limitedResult.content);
|
|
393
|
+
} catch (e) {
|
|
394
|
+
// Fallback if parsing fails - return original result
|
|
395
|
+
finalResult = result;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
return createToolResponse(finalResult);
|
|
399
|
+
} catch (error) {
|
|
400
|
+
logger.error('Chat tool error', { error });
|
|
401
|
+
return createToolError('Chat tool failed', error);
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/**
|
|
406
|
+
* Map model name to provider name
|
|
407
|
+
* @param {string} model - Model name
|
|
408
|
+
* @returns {string} Provider name
|
|
409
|
+
*/
|
|
410
|
+
/**
|
|
411
|
+
* Resolve "auto" model to default model for the provider
|
|
412
|
+
*/
|
|
413
|
+
function resolveAutoModel(model, providerName) {
|
|
414
|
+
if (model.toLowerCase() !== 'auto') {
|
|
415
|
+
return model;
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
const defaults = {
|
|
419
|
+
openai: 'gpt-5',
|
|
420
|
+
xai: 'grok-4-0709',
|
|
421
|
+
google: 'gemini-2.5-pro',
|
|
422
|
+
anthropic: 'claude-sonnet-4-20250514',
|
|
423
|
+
mistral: 'magistral-medium-2506',
|
|
424
|
+
deepseek: 'deepseek-reasoner',
|
|
425
|
+
openrouter: 'qwen/qwen3-coder',
|
|
426
|
+
};
|
|
427
|
+
|
|
428
|
+
return defaults[providerName] || 'gpt-5';
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
export function mapModelToProvider(model, providers) {
|
|
432
|
+
const modelLower = model.toLowerCase();
|
|
433
|
+
|
|
434
|
+
// Handle "auto" - default to OpenAI
|
|
435
|
+
if (modelLower === 'auto') {
|
|
436
|
+
return 'openai';
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
// Check Codex (exact match only - don't route "gpt-5-codex" etc to Codex provider)
|
|
440
|
+
if (modelLower === 'codex') {
|
|
441
|
+
return 'codex';
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
// Check OpenRouter-specific patterns first
|
|
445
|
+
if (
|
|
446
|
+
modelLower === 'openrouter auto' ||
|
|
447
|
+
modelLower === 'auto router' ||
|
|
448
|
+
modelLower === 'auto-router' ||
|
|
449
|
+
modelLower === 'openrouter-auto'
|
|
450
|
+
) {
|
|
451
|
+
return 'openrouter';
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
// If model contains "/", check if native provider supports it
|
|
455
|
+
if (modelLower.includes('/')) {
|
|
456
|
+
// Check each provider to see if they have this exact model
|
|
457
|
+
for (const [providerName, provider] of Object.entries(providers)) {
|
|
458
|
+
if (provider && provider.getModelConfig) {
|
|
459
|
+
const modelConfig = provider.getModelConfig(model);
|
|
460
|
+
if (
|
|
461
|
+
modelConfig &&
|
|
462
|
+
!modelConfig.isDynamic &&
|
|
463
|
+
!modelConfig.needsApiUpdate
|
|
464
|
+
) {
|
|
465
|
+
// Model exists in this provider's static list
|
|
466
|
+
return providerName;
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
// No native provider has this model, route to OpenRouter
|
|
471
|
+
return 'openrouter';
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// For non-slash models, use keyword matching as before
|
|
475
|
+
|
|
476
|
+
// OpenAI models
|
|
477
|
+
if (
|
|
478
|
+
modelLower.includes('gpt') ||
|
|
479
|
+
modelLower.includes('o1') ||
|
|
480
|
+
modelLower.includes('o3') ||
|
|
481
|
+
modelLower.includes('o4')
|
|
482
|
+
) {
|
|
483
|
+
return 'openai';
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// XAI models
|
|
487
|
+
if (modelLower.includes('grok')) {
|
|
488
|
+
return 'xai';
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
// Google models
|
|
492
|
+
if (
|
|
493
|
+
modelLower.includes('gemini') ||
|
|
494
|
+
modelLower.includes('flash') ||
|
|
495
|
+
modelLower.includes('pro') ||
|
|
496
|
+
modelLower === 'google'
|
|
497
|
+
) {
|
|
498
|
+
return 'google';
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// Anthropic models
|
|
502
|
+
if (
|
|
503
|
+
modelLower.includes('claude') ||
|
|
504
|
+
modelLower.includes('opus') ||
|
|
505
|
+
modelLower.includes('sonnet') ||
|
|
506
|
+
modelLower.includes('haiku')
|
|
507
|
+
) {
|
|
508
|
+
return 'anthropic';
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
// Mistral models
|
|
512
|
+
if (modelLower.includes('mistral') || modelLower.includes('magistral')) {
|
|
513
|
+
return 'mistral';
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
// DeepSeek models
|
|
517
|
+
if (
|
|
518
|
+
modelLower.includes('deepseek') ||
|
|
519
|
+
modelLower === 'reasoner' ||
|
|
520
|
+
modelLower === 'r1' ||
|
|
521
|
+
modelLower === 'chat'
|
|
522
|
+
) {
|
|
523
|
+
return 'deepseek';
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
// OpenRouter models (specific model patterns)
|
|
527
|
+
if (
|
|
528
|
+
modelLower.includes('qwen') ||
|
|
529
|
+
modelLower.includes('kimi') ||
|
|
530
|
+
modelLower.includes('moonshot') ||
|
|
531
|
+
modelLower === 'k2'
|
|
532
|
+
) {
|
|
533
|
+
return 'openrouter';
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
// Default fallback
|
|
537
|
+
return 'openai';
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
/**
|
|
541
|
+
* Execute chat with streaming normalization for async execution
|
|
542
|
+
* @param {object} args - Original chat arguments
|
|
543
|
+
* @param {object} dependencies - Dependencies with continuationId
|
|
544
|
+
* @param {object} context - Job execution context
|
|
545
|
+
* @returns {Promise<object>} Complete chat result
|
|
546
|
+
*/
|
|
547
|
+
async function executeChatWithStreaming(args, dependencies, context) {
|
|
548
|
+
const {
|
|
549
|
+
config,
|
|
550
|
+
providers,
|
|
551
|
+
continuationStore,
|
|
552
|
+
contextProcessor,
|
|
553
|
+
providerStreamNormalizer,
|
|
554
|
+
continuationId,
|
|
555
|
+
title: passedTitle, // Title passed from initial submission
|
|
556
|
+
} = dependencies;
|
|
557
|
+
|
|
558
|
+
const {
|
|
559
|
+
prompt,
|
|
560
|
+
model = 'auto',
|
|
561
|
+
files = [],
|
|
562
|
+
temperature = 0.5,
|
|
563
|
+
use_websearch = false,
|
|
564
|
+
images = [],
|
|
565
|
+
reasoning_effort = 'medium',
|
|
566
|
+
verbosity = 'medium',
|
|
567
|
+
} = args;
|
|
568
|
+
|
|
569
|
+
// Initialize SummarizationService
|
|
570
|
+
const summarizationService = new SummarizationService(providers, config);
|
|
571
|
+
|
|
572
|
+
// Use passed title or generate if not provided
|
|
573
|
+
let title = passedTitle;
|
|
574
|
+
if (!title) {
|
|
575
|
+
try {
|
|
576
|
+
title = await summarizationService.generateTitle(prompt);
|
|
577
|
+
debugLog(`Chat: Generated title - "${title}"`);
|
|
578
|
+
} catch (error) {
|
|
579
|
+
debugError('Chat: Failed to generate title', error);
|
|
580
|
+
// Continue without title if generation fails
|
|
581
|
+
}
|
|
582
|
+
} else {
|
|
583
|
+
debugLog(`Chat: Using passed title - "${title}"`);
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
let conversationHistory = [];
|
|
587
|
+
|
|
588
|
+
// Load existing conversation if continuation_id provided
|
|
589
|
+
if (continuationId) {
|
|
590
|
+
try {
|
|
591
|
+
const existingState = await continuationStore.get(continuationId);
|
|
592
|
+
if (existingState) {
|
|
593
|
+
conversationHistory = existingState.messages || [];
|
|
594
|
+
}
|
|
595
|
+
} catch (error) {
|
|
596
|
+
logger.error('Error loading conversation', { error });
|
|
597
|
+
// Continue with fresh conversation on error
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
// Validate file paths before processing
|
|
602
|
+
if (files.length > 0 || images.length > 0) {
|
|
603
|
+
const validation = await validateAllPaths(
|
|
604
|
+
{ files, images },
|
|
605
|
+
{ clientCwd: config.server?.client_cwd },
|
|
606
|
+
);
|
|
607
|
+
if (!validation.valid) {
|
|
608
|
+
logger.error('File validation failed', { errors: validation.errors });
|
|
609
|
+
throw new Error(
|
|
610
|
+
`File validation failed: ${validation.errors.join(', ')}`,
|
|
611
|
+
);
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
// Process context (files, images, web search)
|
|
616
|
+
let contextMessage = null;
|
|
617
|
+
if (files.length > 0 || images.length > 0 || use_websearch) {
|
|
618
|
+
try {
|
|
619
|
+
const contextRequest = {
|
|
620
|
+
files: Array.isArray(files) ? files : [],
|
|
621
|
+
images: Array.isArray(images) ? images : [],
|
|
622
|
+
webSearch: use_websearch ? prompt : null,
|
|
623
|
+
};
|
|
624
|
+
|
|
625
|
+
const contextResult = await contextProcessor.processUnifiedContext(
|
|
626
|
+
contextRequest,
|
|
627
|
+
{
|
|
628
|
+
enforceSecurityCheck: false,
|
|
629
|
+
skipSecurityCheck: true,
|
|
630
|
+
clientCwd: config.server?.client_cwd,
|
|
631
|
+
},
|
|
632
|
+
);
|
|
633
|
+
|
|
634
|
+
// Create context message from files and images
|
|
635
|
+
const allProcessedFiles = [
|
|
636
|
+
...contextResult.files,
|
|
637
|
+
...contextResult.images,
|
|
638
|
+
];
|
|
639
|
+
if (allProcessedFiles.length > 0) {
|
|
640
|
+
contextMessage = createFileContext(allProcessedFiles, {
|
|
641
|
+
includeMetadata: true,
|
|
642
|
+
includeErrors: true,
|
|
643
|
+
});
|
|
644
|
+
}
|
|
645
|
+
} catch (error) {
|
|
646
|
+
logger.error('Error processing context', { error });
|
|
647
|
+
// Continue without context if processing fails
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
// Build message array for provider
|
|
652
|
+
const messages = [];
|
|
653
|
+
|
|
654
|
+
// Add system prompt only if not already in conversation history
|
|
655
|
+
if (
|
|
656
|
+
conversationHistory.length === 0 ||
|
|
657
|
+
conversationHistory[0].role !== 'system'
|
|
658
|
+
) {
|
|
659
|
+
messages.push({
|
|
660
|
+
role: 'system',
|
|
661
|
+
content: CHAT_PROMPT,
|
|
662
|
+
});
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
// Add conversation history
|
|
666
|
+
messages.push(...conversationHistory);
|
|
667
|
+
|
|
668
|
+
// Add user prompt with context
|
|
669
|
+
const userMessage = {
|
|
670
|
+
role: 'user',
|
|
671
|
+
content: prompt,
|
|
672
|
+
};
|
|
673
|
+
|
|
674
|
+
// If we have context (files/images), create complex content array
|
|
675
|
+
if (contextMessage && contextMessage.content) {
|
|
676
|
+
userMessage.content = [
|
|
677
|
+
...contextMessage.content,
|
|
678
|
+
{ type: 'text', text: prompt },
|
|
679
|
+
];
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
messages.push(userMessage);
|
|
683
|
+
|
|
684
|
+
// Select provider
|
|
685
|
+
let selectedProvider;
|
|
686
|
+
let providerName;
|
|
687
|
+
|
|
688
|
+
if (model === 'auto') {
|
|
689
|
+
// Auto-select first available provider
|
|
690
|
+
const availableProviders = Object.keys(providers).filter((name) => {
|
|
691
|
+
const provider = providers[name];
|
|
692
|
+
return provider && provider.isAvailable && provider.isAvailable(config);
|
|
693
|
+
});
|
|
694
|
+
|
|
695
|
+
if (availableProviders.length === 0) {
|
|
696
|
+
throw new Error(
|
|
697
|
+
'No providers available. Please configure at least one API key.',
|
|
698
|
+
);
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
providerName = availableProviders[0];
|
|
702
|
+
selectedProvider = providers[providerName];
|
|
703
|
+
} else {
|
|
704
|
+
// Use specified provider/model
|
|
705
|
+
providerName = mapModelToProvider(model, providers);
|
|
706
|
+
selectedProvider = providers[providerName];
|
|
707
|
+
|
|
708
|
+
if (!selectedProvider) {
|
|
709
|
+
throw new Error(`Provider not found for model: ${model}`);
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
if (!selectedProvider.isAvailable(config)) {
|
|
713
|
+
throw new Error(
|
|
714
|
+
`Provider ${providerName} is not available. Check API key configuration.`,
|
|
715
|
+
);
|
|
716
|
+
}
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
// Resolve model name and prepare provider options
|
|
720
|
+
const resolvedModel = resolveAutoModel(model, providerName);
|
|
721
|
+
const providerOptions = {
|
|
722
|
+
model: resolvedModel,
|
|
723
|
+
temperature,
|
|
724
|
+
reasoning_effort,
|
|
725
|
+
verbosity,
|
|
726
|
+
use_websearch,
|
|
727
|
+
config,
|
|
728
|
+
continuation_id: continuationId, // Pass for thread resumption
|
|
729
|
+
continuationStore, // Pass store for state management
|
|
730
|
+
};
|
|
731
|
+
|
|
732
|
+
// For streaming, add the stream flag and signal separately
|
|
733
|
+
const streamingOptions = {
|
|
734
|
+
...providerOptions,
|
|
735
|
+
stream: true,
|
|
736
|
+
signal: context?.signal, // Pass AbortSignal for cancellation support
|
|
737
|
+
};
|
|
738
|
+
|
|
739
|
+
// Check if provider supports streaming (by checking if invoke can return a stream)
|
|
740
|
+
let response;
|
|
741
|
+
const startTime = Date.now();
|
|
742
|
+
|
|
743
|
+
// Always use streaming for async execution in background
|
|
744
|
+
if (context?.jobId) {
|
|
745
|
+
// Use streaming with normalization
|
|
746
|
+
debugLog(`Chat: Using streaming for provider ${providerName}`);
|
|
747
|
+
|
|
748
|
+
const stream = await selectedProvider.invoke(messages, streamingOptions);
|
|
749
|
+
const normalizedStream = providerStreamNormalizer.normalize(
|
|
750
|
+
providerName,
|
|
751
|
+
stream,
|
|
752
|
+
{
|
|
753
|
+
model: resolvedModel,
|
|
754
|
+
requestId: context.jobId,
|
|
755
|
+
},
|
|
756
|
+
);
|
|
757
|
+
|
|
758
|
+
// Process normalized stream and build final response
|
|
759
|
+
let accumulatedContent = '';
|
|
760
|
+
let finalUsage = null;
|
|
761
|
+
let finalMetadata = {};
|
|
762
|
+
|
|
763
|
+
for await (const event of normalizedStream) {
|
|
764
|
+
// Check for cancellation
|
|
765
|
+
if (context.signal.aborted) {
|
|
766
|
+
throw new Error('Chat execution was cancelled');
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
switch (event.type) {
|
|
770
|
+
case 'start':
|
|
771
|
+
// Update job with streaming started status, provider info, and title
|
|
772
|
+
await context.updateJob({
|
|
773
|
+
status: 'running',
|
|
774
|
+
provider: providerName,
|
|
775
|
+
model: resolvedModel,
|
|
776
|
+
title: title || undefined, // Include title if generated
|
|
777
|
+
progress: {
|
|
778
|
+
phase: 'streaming_started',
|
|
779
|
+
provider: providerName,
|
|
780
|
+
model: resolvedModel,
|
|
781
|
+
},
|
|
782
|
+
});
|
|
783
|
+
break;
|
|
784
|
+
|
|
785
|
+
case 'delta':
|
|
786
|
+
accumulatedContent += event.data.textDelta;
|
|
787
|
+
// Update job with progress and full accumulated content
|
|
788
|
+
await context.updateJob({
|
|
789
|
+
accumulated_content: accumulatedContent, // Store full content
|
|
790
|
+
progress: {
|
|
791
|
+
phase: 'streaming',
|
|
792
|
+
provider: providerName,
|
|
793
|
+
model: resolvedModel,
|
|
794
|
+
content_length: accumulatedContent.length,
|
|
795
|
+
},
|
|
796
|
+
});
|
|
797
|
+
break;
|
|
798
|
+
|
|
799
|
+
case 'reasoning_summary':
|
|
800
|
+
// Update job with reasoning summary
|
|
801
|
+
debugLog(
|
|
802
|
+
`[Chat] *** UPDATING JOB WITH REASONING: "${event.data.content?.substring(0, 100)}..."`,
|
|
803
|
+
);
|
|
804
|
+
await context.updateJob({
|
|
805
|
+
reasoning_summary: event.data.content,
|
|
806
|
+
});
|
|
807
|
+
break;
|
|
808
|
+
|
|
809
|
+
case 'usage':
|
|
810
|
+
finalUsage = event.data.usage;
|
|
811
|
+
break;
|
|
812
|
+
|
|
813
|
+
case 'end':
|
|
814
|
+
accumulatedContent = event.data.content || accumulatedContent;
|
|
815
|
+
finalUsage = event.data.usage || finalUsage;
|
|
816
|
+
finalMetadata = event.data.metadata || finalMetadata;
|
|
817
|
+
break;
|
|
818
|
+
|
|
819
|
+
case 'error':
|
|
820
|
+
throw new Error(`Streaming error: ${event.data.error.message}`);
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
|
|
824
|
+
response = {
|
|
825
|
+
content: accumulatedContent,
|
|
826
|
+
metadata: {
|
|
827
|
+
...finalMetadata,
|
|
828
|
+
usage: finalUsage,
|
|
829
|
+
streaming: true,
|
|
830
|
+
},
|
|
831
|
+
};
|
|
832
|
+
} else {
|
|
833
|
+
// Fall back to regular invoke
|
|
834
|
+
debugLog(`Chat: Using regular invoke for provider ${providerName}`);
|
|
835
|
+
response = await selectedProvider.invoke(messages, providerOptions);
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
const executionTime = (Date.now() - startTime) / 1000;
|
|
839
|
+
|
|
840
|
+
// Validate response
|
|
841
|
+
if (!response || !response.content) {
|
|
842
|
+
throw new Error('Provider returned invalid response');
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
// Store reasoning summary from OpenAI if available
|
|
846
|
+
if (
|
|
847
|
+
response.metadata?.usage?.reasoning_summary &&
|
|
848
|
+
context &&
|
|
849
|
+
context.updateJob
|
|
850
|
+
) {
|
|
851
|
+
try {
|
|
852
|
+
await context.updateJob({
|
|
853
|
+
reasoning_summary: response.metadata.usage.reasoning_summary,
|
|
854
|
+
});
|
|
855
|
+
debugLog('Chat: Stored reasoning summary');
|
|
856
|
+
} catch (error) {
|
|
857
|
+
debugError('Chat: Failed to store reasoning summary', error);
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
// Generate final summary for responses longer than 100 characters (non-blocking)
|
|
862
|
+
let finalSummary = null;
|
|
863
|
+
if (response.content && response.content.length > 100) {
|
|
864
|
+
try {
|
|
865
|
+
finalSummary = await summarizationService.generateFinalSummary(
|
|
866
|
+
response.content,
|
|
867
|
+
);
|
|
868
|
+
debugLog(`Chat: Generated final summary - "${finalSummary}"`);
|
|
869
|
+
// Store final summary in job
|
|
870
|
+
if (finalSummary && context && context.updateJob) {
|
|
871
|
+
await context.updateJob({
|
|
872
|
+
final_summary: finalSummary,
|
|
873
|
+
});
|
|
874
|
+
}
|
|
875
|
+
} catch (error) {
|
|
876
|
+
debugError('Chat: Failed to generate final summary', error);
|
|
877
|
+
// Continue without summary if generation fails
|
|
878
|
+
}
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
// Add assistant response to conversation history
|
|
882
|
+
const assistantMessage = {
|
|
883
|
+
role: 'assistant',
|
|
884
|
+
content: response.content,
|
|
885
|
+
};
|
|
886
|
+
|
|
887
|
+
const updatedMessages = [...messages, assistantMessage];
|
|
888
|
+
|
|
889
|
+
// Save conversation state
|
|
890
|
+
try {
|
|
891
|
+
const conversationState = {
|
|
892
|
+
messages: updatedMessages,
|
|
893
|
+
provider: providerName,
|
|
894
|
+
model,
|
|
895
|
+
lastUpdated: Date.now(),
|
|
896
|
+
// Store Codex thread ID if available (for thread resumption)
|
|
897
|
+
codexThreadId: response.metadata?.threadId,
|
|
898
|
+
};
|
|
899
|
+
|
|
900
|
+
await continuationStore.set(continuationId, conversationState);
|
|
901
|
+
} catch (error) {
|
|
902
|
+
logger.error('Error saving conversation', { error });
|
|
903
|
+
// Continue even if save fails
|
|
904
|
+
}
|
|
905
|
+
|
|
906
|
+
// Return complete result for job completion
|
|
907
|
+
return {
|
|
908
|
+
content: response.content,
|
|
909
|
+
title: title || undefined, // Include title if generated
|
|
910
|
+
summary: finalSummary || undefined, // Include summary if generated
|
|
911
|
+
continuation: {
|
|
912
|
+
id: continuationId,
|
|
913
|
+
provider: providerName,
|
|
914
|
+
model,
|
|
915
|
+
messageCount: updatedMessages.filter((msg) => msg.role !== 'system')
|
|
916
|
+
.length,
|
|
917
|
+
},
|
|
918
|
+
metadata: {
|
|
919
|
+
provider: providerName,
|
|
920
|
+
model: resolvedModel,
|
|
921
|
+
execution_time: executionTime,
|
|
922
|
+
async_execution: true,
|
|
923
|
+
...response.metadata,
|
|
924
|
+
},
|
|
925
|
+
};
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
// Tool metadata
|
|
929
|
+
chatTool.description =
|
|
930
|
+
'GENERAL CHAT & COLLABORATIVE THINKING - Development assistance, brainstorming, code analysis. Supports files, images, continuation_id for multi-turn conversations. Use model: "auto" for automatic selection.';
|
|
931
|
+
chatTool.inputSchema = {
|
|
932
|
+
type: 'object',
|
|
933
|
+
properties: {
|
|
934
|
+
model: {
|
|
935
|
+
type: 'string',
|
|
936
|
+
description:
|
|
937
|
+
'AI model to use. Examples: "auto" (recommended), "gpt-5", "gemini-2.5-pro", "grok-4-0709". Defaults to auto-selection.',
|
|
938
|
+
},
|
|
939
|
+
files: {
|
|
940
|
+
type: 'array',
|
|
941
|
+
items: { type: 'string' },
|
|
942
|
+
description:
|
|
943
|
+
'File paths to include as context (absolute or relative paths). Example: ["C:\\Users\\username\\project\\src\\auth.js", "./config.json"]',
|
|
944
|
+
},
|
|
945
|
+
images: {
|
|
946
|
+
type: 'array',
|
|
947
|
+
items: { type: 'string' },
|
|
948
|
+
description:
|
|
949
|
+
'Image paths for visual context (absolute or relative paths, or base64 data). Example: ["C:\\Users\\username\\diagram.png", "./screenshot.jpg", "data:image/jpeg;base64,/9j/4AAQ..."]',
|
|
950
|
+
},
|
|
951
|
+
continuation_id: {
|
|
952
|
+
type: 'string',
|
|
953
|
+
description:
|
|
954
|
+
'Continuation ID for persistent conversation. Example: "chat_1703123456789_abc123"',
|
|
955
|
+
},
|
|
956
|
+
temperature: {
|
|
957
|
+
type: 'number',
|
|
958
|
+
description:
|
|
959
|
+
'Response randomness (0.0-1.0). Examples: 0.2 (focused), 0.5 (balanced), 0.8 (creative). Default: 0.5',
|
|
960
|
+
minimum: 0.0,
|
|
961
|
+
maximum: 1.0,
|
|
962
|
+
default: 0.5,
|
|
963
|
+
},
|
|
964
|
+
reasoning_effort: {
|
|
965
|
+
type: 'string',
|
|
966
|
+
enum: ['none', 'minimal', 'low', 'medium', 'high', 'max'],
|
|
967
|
+
description:
|
|
968
|
+
'Reasoning depth for thinking models. Examples: "none" (no reasoning, fastest - GPT-5.1+ only), "minimal" (few reasoning tokens), "low" (light analysis), "medium" (balanced), "high" (complex analysis). Default: "medium"',
|
|
969
|
+
default: 'medium',
|
|
970
|
+
},
|
|
971
|
+
verbosity: {
|
|
972
|
+
type: 'string',
|
|
973
|
+
enum: ['low', 'medium', 'high'],
|
|
974
|
+
description:
|
|
975
|
+
'Output verbosity for GPT-5 models. Examples: "low" (concise answers), "medium" (balanced), "high" (thorough explanations). Default: "medium"',
|
|
976
|
+
default: 'medium',
|
|
977
|
+
},
|
|
978
|
+
use_websearch: {
|
|
979
|
+
type: 'boolean',
|
|
980
|
+
description:
|
|
981
|
+
'Enable web search for current information. Example: true for recent developments or up to date documentation. Default: false',
|
|
982
|
+
default: false,
|
|
983
|
+
},
|
|
984
|
+
async: {
|
|
985
|
+
type: 'boolean',
|
|
986
|
+
description:
|
|
987
|
+
'Execute chat in background. When true, returns continuation_id immediately and processes request asynchronously. Default: false',
|
|
988
|
+
default: false,
|
|
989
|
+
},
|
|
990
|
+
prompt: {
|
|
991
|
+
type: 'string',
|
|
992
|
+
description:
|
|
993
|
+
'Your question or topic with relevant context. More detail enables better responses. Example: "How should I structure the authentication module for this Express.js API?"',
|
|
994
|
+
},
|
|
995
|
+
},
|
|
996
|
+
required: ['prompt'],
|
|
997
|
+
};
|