converse-mcp-server 2.3.1 → 2.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +771 -738
- package/docs/API.md +10 -1
- package/docs/PROVIDERS.md +8 -4
- package/package.json +12 -12
- package/src/async/asyncJobStore.js +82 -52
- package/src/async/eventBus.js +25 -20
- package/src/async/fileCache.js +121 -40
- package/src/async/jobRunner.js +65 -39
- package/src/async/providerStreamNormalizer.js +203 -117
- package/src/config.js +374 -102
- package/src/continuationStore.js +32 -24
- package/src/index.js +45 -25
- package/src/prompts/helpPrompt.js +328 -305
- package/src/providers/anthropic.js +303 -119
- package/src/providers/codex.js +103 -45
- package/src/providers/deepseek.js +24 -8
- package/src/providers/google.js +337 -93
- package/src/providers/index.js +1 -1
- package/src/providers/interface.js +16 -11
- package/src/providers/mistral.js +179 -69
- package/src/providers/openai-compatible.js +231 -94
- package/src/providers/openai.js +1094 -914
- package/src/providers/openrouter-endpoints-client.js +220 -216
- package/src/providers/openrouter.js +426 -381
- package/src/providers/xai.js +153 -56
- package/src/resources/helpResource.js +70 -67
- package/src/router.js +95 -67
- package/src/services/summarizationService.js +51 -24
- package/src/systemPrompts.js +89 -89
- package/src/tools/cancelJob.js +31 -19
- package/src/tools/chat.js +997 -883
- package/src/tools/checkStatus.js +86 -65
- package/src/tools/consensus.js +400 -234
- package/src/tools/index.js +39 -16
- package/src/transport/httpTransport.js +82 -55
- package/src/utils/contextProcessor.js +54 -37
- package/src/utils/errorHandler.js +95 -45
- package/src/utils/fileValidator.js +107 -98
- package/src/utils/formatStatus.js +122 -64
- package/src/utils/logger.js +459 -449
- package/src/utils/pathUtils.js +2 -2
- package/src/utils/tokenLimiter.js +216 -216
package/src/providers/openai.js
CHANGED
|
@@ -1,914 +1,1094 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* OpenAI Provider
|
|
3
|
-
*
|
|
4
|
-
* Provider implementation for OpenAI GPT models using the official OpenAI SDK v5.
|
|
5
|
-
* Implements the unified interface: async invoke(messages, options) => { content, stop_reason, rawResponse }
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
import OpenAI from 'openai';
|
|
9
|
-
import { debugLog, debugError } from '../utils/console.js';
|
|
10
|
-
|
|
11
|
-
// Define supported models with their capabilities
|
|
12
|
-
const SUPPORTED_MODELS = {
|
|
13
|
-
'gpt-5.1': {
|
|
14
|
-
modelName: 'gpt-5.1',
|
|
15
|
-
friendlyName: 'OpenAI (GPT-5.1)',
|
|
16
|
-
contextWindow: 400000,
|
|
17
|
-
maxOutputTokens: 128000,
|
|
18
|
-
supportsStreaming: true,
|
|
19
|
-
supportsImages: true,
|
|
20
|
-
supportsTemperature: false,
|
|
21
|
-
supportsWebSearch: true,
|
|
22
|
-
supportsResponsesAPI: true,
|
|
23
|
-
supportsNoneReasoningEffort: true,
|
|
24
|
-
timeout: 3600000, // 1 hour
|
|
25
|
-
description:
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
if (
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
if (
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
//
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Provider
|
|
3
|
+
*
|
|
4
|
+
* Provider implementation for OpenAI GPT models using the official OpenAI SDK v5.
|
|
5
|
+
* Implements the unified interface: async invoke(messages, options) => { content, stop_reason, rawResponse }
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import OpenAI from 'openai';
|
|
9
|
+
import { debugLog, debugError } from '../utils/console.js';
|
|
10
|
+
|
|
11
|
+
// Define supported models with their capabilities
|
|
12
|
+
const SUPPORTED_MODELS = {
|
|
13
|
+
'gpt-5.1': {
|
|
14
|
+
modelName: 'gpt-5.1',
|
|
15
|
+
friendlyName: 'OpenAI (GPT-5.1)',
|
|
16
|
+
contextWindow: 400000,
|
|
17
|
+
maxOutputTokens: 128000,
|
|
18
|
+
supportsStreaming: true,
|
|
19
|
+
supportsImages: true,
|
|
20
|
+
supportsTemperature: false, // GPT-5 doesn't support temperature
|
|
21
|
+
supportsWebSearch: true,
|
|
22
|
+
supportsResponsesAPI: true,
|
|
23
|
+
supportsNoneReasoningEffort: true, // GPT-5.1 supports "none" for faster responses
|
|
24
|
+
timeout: 3600000, // 1 hour
|
|
25
|
+
description:
|
|
26
|
+
'Latest flagship model (400K context, 128K output) - Superior reasoning, code generation, analysis. Supports "none" reasoning for faster responses',
|
|
27
|
+
aliases: [
|
|
28
|
+
'gpt-5',
|
|
29
|
+
'gpt5',
|
|
30
|
+
'gpt 5',
|
|
31
|
+
'gpt-5.1-2025-11-13',
|
|
32
|
+
'gpt5.1',
|
|
33
|
+
'gpt 5.1',
|
|
34
|
+
],
|
|
35
|
+
},
|
|
36
|
+
'gpt-5-2025-08-07': {
|
|
37
|
+
modelName: 'gpt-5-2025-08-07',
|
|
38
|
+
friendlyName: 'OpenAI (GPT-5.0)',
|
|
39
|
+
contextWindow: 400000,
|
|
40
|
+
maxOutputTokens: 128000,
|
|
41
|
+
supportsStreaming: true,
|
|
42
|
+
supportsImages: true,
|
|
43
|
+
supportsTemperature: false, // GPT-5 doesn't support temperature
|
|
44
|
+
supportsWebSearch: true,
|
|
45
|
+
supportsResponsesAPI: true,
|
|
46
|
+
supportsNoneReasoningEffort: false, // GPT-5.0 does not support "none" reasoning
|
|
47
|
+
timeout: 3600000, // 1 hour
|
|
48
|
+
description:
|
|
49
|
+
'GPT-5.0 model (400K context, 128K output) - Previous version, accessible via fully qualified name',
|
|
50
|
+
aliases: ['gpt-5.0', 'gpt5.0', 'gpt 5.0'],
|
|
51
|
+
},
|
|
52
|
+
'gpt-5-mini': {
|
|
53
|
+
modelName: 'gpt-5-mini',
|
|
54
|
+
friendlyName: 'OpenAI (GPT-5-mini)',
|
|
55
|
+
contextWindow: 400000,
|
|
56
|
+
maxOutputTokens: 128000,
|
|
57
|
+
supportsStreaming: true,
|
|
58
|
+
supportsImages: true,
|
|
59
|
+
supportsTemperature: false, // GPT-5 models don't support temperature
|
|
60
|
+
supportsWebSearch: true,
|
|
61
|
+
supportsResponsesAPI: true,
|
|
62
|
+
timeout: 1800000, // 30 minutes
|
|
63
|
+
description:
|
|
64
|
+
'Faster, cost-efficient GPT-5 (400K context, 128K output) - Well-defined tasks, precise prompts',
|
|
65
|
+
aliases: ['gpt5-mini', 'gpt-5mini', 'gpt 5 mini', 'gpt-5-mini-2025-08-07'],
|
|
66
|
+
},
|
|
67
|
+
'gpt-5-nano': {
|
|
68
|
+
modelName: 'gpt-5-nano',
|
|
69
|
+
friendlyName: 'OpenAI (GPT-5-nano)',
|
|
70
|
+
contextWindow: 400000,
|
|
71
|
+
maxOutputTokens: 128000,
|
|
72
|
+
supportsStreaming: true,
|
|
73
|
+
supportsImages: true,
|
|
74
|
+
supportsTemperature: false, // GPT-5 models don't support temperature
|
|
75
|
+
supportsWebSearch: false, // GPT-5-nano doesn't support web search
|
|
76
|
+
supportsResponsesAPI: true,
|
|
77
|
+
timeout: 600000, // 10 minutes
|
|
78
|
+
description:
|
|
79
|
+
'Fastest, most cost-efficient GPT-5 (400K context, 128K output) - Summarization, classification',
|
|
80
|
+
aliases: ['gpt5-nano', 'gpt-5nano', 'gpt 5 nano', 'gpt-5-nano-2025-08-07'],
|
|
81
|
+
},
|
|
82
|
+
'gpt-5-pro': {
|
|
83
|
+
modelName: 'gpt-5-pro',
|
|
84
|
+
friendlyName: 'OpenAI (GPT-5 Pro)',
|
|
85
|
+
contextWindow: 400000,
|
|
86
|
+
maxOutputTokens: 272000,
|
|
87
|
+
supportsStreaming: false, // GPT-5 Pro doesn't support streaming
|
|
88
|
+
supportsImages: true,
|
|
89
|
+
supportsTemperature: false, // GPT-5 models don't support temperature
|
|
90
|
+
supportsWebSearch: true,
|
|
91
|
+
supportsResponsesAPI: true,
|
|
92
|
+
supportsDeepResearch: false, // Not a deep research model
|
|
93
|
+
timeout: 3600000, // 60 minutes - some requests may take several minutes
|
|
94
|
+
description:
|
|
95
|
+
'Most advanced reasoning model (400K context, 272K output) - Hardest problems, extended compute time (EXPENSIVE)',
|
|
96
|
+
aliases: [
|
|
97
|
+
'gpt5-pro',
|
|
98
|
+
'gpt-5pro',
|
|
99
|
+
'gpt 5 pro',
|
|
100
|
+
'gpt-5 pro',
|
|
101
|
+
'gpt-5-pro-2025-10-06',
|
|
102
|
+
],
|
|
103
|
+
},
|
|
104
|
+
o3: {
|
|
105
|
+
modelName: 'o3',
|
|
106
|
+
friendlyName: 'OpenAI (O3)',
|
|
107
|
+
contextWindow: 200000,
|
|
108
|
+
maxOutputTokens: 100000,
|
|
109
|
+
supportsStreaming: true,
|
|
110
|
+
supportsImages: true,
|
|
111
|
+
supportsTemperature: false,
|
|
112
|
+
supportsWebSearch: true,
|
|
113
|
+
supportsResponsesAPI: true,
|
|
114
|
+
timeout: 600000, // 10 minutes
|
|
115
|
+
description:
|
|
116
|
+
'Strong reasoning (200K context) - Logical problems, code generation, systematic analysis',
|
|
117
|
+
aliases: ['o3-2025-01-31'],
|
|
118
|
+
},
|
|
119
|
+
'o3-mini': {
|
|
120
|
+
modelName: 'o3-mini',
|
|
121
|
+
friendlyName: 'OpenAI (O3-mini)',
|
|
122
|
+
contextWindow: 200000,
|
|
123
|
+
maxOutputTokens: 100000,
|
|
124
|
+
supportsStreaming: true,
|
|
125
|
+
supportsImages: true,
|
|
126
|
+
supportsTemperature: false,
|
|
127
|
+
supportsWebSearch: false, // o3-mini does not support web search
|
|
128
|
+
supportsResponsesAPI: true,
|
|
129
|
+
timeout: 300000,
|
|
130
|
+
description:
|
|
131
|
+
'Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity',
|
|
132
|
+
aliases: ['o3mini', 'o3 mini', 'o3-mini-2025-01-31'],
|
|
133
|
+
},
|
|
134
|
+
'o3-pro-2025-06-10': {
|
|
135
|
+
modelName: 'o3-pro-2025-06-10',
|
|
136
|
+
friendlyName: 'OpenAI (O3-Pro)',
|
|
137
|
+
contextWindow: 200000,
|
|
138
|
+
maxOutputTokens: 100000,
|
|
139
|
+
supportsStreaming: true,
|
|
140
|
+
supportsImages: true,
|
|
141
|
+
supportsTemperature: false,
|
|
142
|
+
supportsWebSearch: true,
|
|
143
|
+
supportsResponsesAPI: true,
|
|
144
|
+
timeout: 3600000, // 60 minutes
|
|
145
|
+
description:
|
|
146
|
+
'Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems',
|
|
147
|
+
aliases: ['o3-pro', 'o3pro', 'o3 pro'],
|
|
148
|
+
},
|
|
149
|
+
'o4-mini': {
|
|
150
|
+
modelName: 'o4-mini',
|
|
151
|
+
friendlyName: 'OpenAI (O4-mini)',
|
|
152
|
+
contextWindow: 200000,
|
|
153
|
+
maxOutputTokens: 100000,
|
|
154
|
+
supportsStreaming: true,
|
|
155
|
+
supportsImages: true,
|
|
156
|
+
supportsTemperature: false,
|
|
157
|
+
supportsWebSearch: true,
|
|
158
|
+
supportsResponsesAPI: true,
|
|
159
|
+
timeout: 180000, // 3 minutes
|
|
160
|
+
description:
|
|
161
|
+
'Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning',
|
|
162
|
+
aliases: ['o4mini', 'o4', 'o4 mini', 'o4-mini-2025-01-30'],
|
|
163
|
+
},
|
|
164
|
+
'gpt-4.1-2025-04-14': {
|
|
165
|
+
modelName: 'gpt-4.1-2025-04-14',
|
|
166
|
+
friendlyName: 'OpenAI (GPT-4.1)',
|
|
167
|
+
contextWindow: 1000000,
|
|
168
|
+
maxOutputTokens: 32768,
|
|
169
|
+
supportsStreaming: true,
|
|
170
|
+
supportsImages: true,
|
|
171
|
+
supportsTemperature: true,
|
|
172
|
+
supportsWebSearch: true,
|
|
173
|
+
supportsResponsesAPI: true,
|
|
174
|
+
timeout: 300000,
|
|
175
|
+
description:
|
|
176
|
+
'GPT-4.1 (1M context) - Advanced reasoning model with large context window',
|
|
177
|
+
aliases: ['gpt4.1', 'gpt-4.1', 'gpt 4.1', 'gpt-4.1-latest'],
|
|
178
|
+
},
|
|
179
|
+
'gpt-4o': {
|
|
180
|
+
modelName: 'gpt-4o',
|
|
181
|
+
friendlyName: 'OpenAI (GPT-4o)',
|
|
182
|
+
contextWindow: 128000,
|
|
183
|
+
maxOutputTokens: 16384,
|
|
184
|
+
supportsStreaming: true,
|
|
185
|
+
supportsImages: true,
|
|
186
|
+
supportsTemperature: true,
|
|
187
|
+
supportsWebSearch: true,
|
|
188
|
+
supportsResponsesAPI: true,
|
|
189
|
+
timeout: 180000,
|
|
190
|
+
description:
|
|
191
|
+
'GPT-4o (128K context) - Multimodal flagship model with vision capabilities',
|
|
192
|
+
aliases: ['gpt4o', 'gpt 4o', '4o'],
|
|
193
|
+
},
|
|
194
|
+
'gpt-4o-mini': {
|
|
195
|
+
modelName: 'gpt-4o-mini',
|
|
196
|
+
friendlyName: 'OpenAI (GPT-4o-mini)',
|
|
197
|
+
contextWindow: 128000,
|
|
198
|
+
maxOutputTokens: 16384,
|
|
199
|
+
supportsStreaming: true,
|
|
200
|
+
supportsImages: true,
|
|
201
|
+
supportsTemperature: true,
|
|
202
|
+
supportsWebSearch: true,
|
|
203
|
+
supportsResponsesAPI: true,
|
|
204
|
+
timeout: 120000,
|
|
205
|
+
description:
|
|
206
|
+
'GPT-4o-mini (128K context) - Fast and efficient multimodal model',
|
|
207
|
+
aliases: ['gpt4o-mini', 'gpt 4o mini', '4o mini', '4o-mini'],
|
|
208
|
+
},
|
|
209
|
+
'o3-deep-research-2025-06-26': {
|
|
210
|
+
modelName: 'o3-deep-research-2025-06-26',
|
|
211
|
+
friendlyName: 'OpenAI (O3 Deep Research)',
|
|
212
|
+
contextWindow: 200000,
|
|
213
|
+
maxOutputTokens: 100000,
|
|
214
|
+
supportsStreaming: true,
|
|
215
|
+
supportsImages: true,
|
|
216
|
+
supportsTemperature: false,
|
|
217
|
+
supportsWebSearch: true,
|
|
218
|
+
supportsResponsesAPI: true,
|
|
219
|
+
supportsDeepResearch: true,
|
|
220
|
+
timeout: 7200000, // 120 minutes for deep research
|
|
221
|
+
description:
|
|
222
|
+
'Deep research model (200K context) - In-depth synthesis, comprehensive reports, multi-source analysis (30-90 min runtime)',
|
|
223
|
+
aliases: [
|
|
224
|
+
'o3-deep-research',
|
|
225
|
+
'o3-research',
|
|
226
|
+
'o3 deep research',
|
|
227
|
+
'deep-research-o3',
|
|
228
|
+
],
|
|
229
|
+
},
|
|
230
|
+
'o4-mini-deep-research-2025-06-26': {
|
|
231
|
+
modelName: 'o4-mini-deep-research-2025-06-26',
|
|
232
|
+
friendlyName: 'OpenAI (O4-mini Deep Research)',
|
|
233
|
+
contextWindow: 200000,
|
|
234
|
+
maxOutputTokens: 100000,
|
|
235
|
+
supportsStreaming: true,
|
|
236
|
+
supportsImages: true,
|
|
237
|
+
supportsTemperature: false,
|
|
238
|
+
supportsWebSearch: true,
|
|
239
|
+
supportsResponsesAPI: true,
|
|
240
|
+
supportsDeepResearch: true,
|
|
241
|
+
timeout: 3600000, // 60 minutes for faster deep research
|
|
242
|
+
description:
|
|
243
|
+
'Fast deep research model (200K context) - Lightweight research, faster results, latency-sensitive analysis (15-60 min runtime)',
|
|
244
|
+
aliases: [
|
|
245
|
+
'o4-mini-deep-research',
|
|
246
|
+
'o4-mini-research',
|
|
247
|
+
'o4-research',
|
|
248
|
+
'o4 mini deep research',
|
|
249
|
+
'deep-research-o4-mini',
|
|
250
|
+
'o4-deep-research',
|
|
251
|
+
],
|
|
252
|
+
},
|
|
253
|
+
};
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Custom error class for OpenAI provider errors
|
|
257
|
+
*/
|
|
258
|
+
class OpenAIProviderError extends Error {
|
|
259
|
+
constructor(message, code, originalError = null) {
|
|
260
|
+
super(message);
|
|
261
|
+
this.name = 'OpenAIProviderError';
|
|
262
|
+
this.code = code;
|
|
263
|
+
this.originalError = originalError;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Resolve model name to canonical form, including aliases
|
|
269
|
+
*/
|
|
270
|
+
function resolveModelName(modelName) {
|
|
271
|
+
const modelNameLower = modelName.toLowerCase();
|
|
272
|
+
|
|
273
|
+
// Check exact matches first
|
|
274
|
+
for (const [supportedModel] of Object.entries(SUPPORTED_MODELS)) {
|
|
275
|
+
if (supportedModel.toLowerCase() === modelNameLower) {
|
|
276
|
+
return supportedModel;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Check aliases
|
|
281
|
+
for (const [supportedModel, config] of Object.entries(SUPPORTED_MODELS)) {
|
|
282
|
+
if (config.aliases) {
|
|
283
|
+
for (const alias of config.aliases) {
|
|
284
|
+
if (alias.toLowerCase() === modelNameLower) {
|
|
285
|
+
return supportedModel;
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Return as-is if not found (let OpenAI API handle unknown models)
|
|
292
|
+
return modelName;
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/**
|
|
296
|
+
* Validate OpenAI API key format
|
|
297
|
+
*/
|
|
298
|
+
function validateApiKey(apiKey) {
|
|
299
|
+
if (!apiKey || typeof apiKey !== 'string') {
|
|
300
|
+
return false;
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// OpenAI API keys typically start with 'sk-' and are at least 20 characters
|
|
304
|
+
return apiKey.startsWith('sk-') && apiKey.length >= 20;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* Convert messages to OpenAI format, handling both Responses API and Chat Completions API
|
|
309
|
+
*/
|
|
310
|
+
function convertMessages(messages, useResponsesAPI = false) {
|
|
311
|
+
if (!Array.isArray(messages)) {
|
|
312
|
+
throw new OpenAIProviderError(
|
|
313
|
+
'Messages must be an array',
|
|
314
|
+
'INVALID_MESSAGES',
|
|
315
|
+
);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
return messages.map((msg, index) => {
|
|
319
|
+
if (!msg || typeof msg !== 'object') {
|
|
320
|
+
throw new OpenAIProviderError(
|
|
321
|
+
`Message at index ${index} must be an object`,
|
|
322
|
+
'INVALID_MESSAGE',
|
|
323
|
+
);
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
const { role, content } = msg;
|
|
327
|
+
|
|
328
|
+
if (!role || !['system', 'user', 'assistant'].includes(role)) {
|
|
329
|
+
throw new OpenAIProviderError(
|
|
330
|
+
`Invalid role "${role}" at message index ${index}`,
|
|
331
|
+
'INVALID_ROLE',
|
|
332
|
+
);
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
if (!content) {
|
|
336
|
+
throw new OpenAIProviderError(
|
|
337
|
+
`Message content is required at index ${index}`,
|
|
338
|
+
'MISSING_CONTENT',
|
|
339
|
+
);
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
// Handle complex content structure (array with text and images)
|
|
343
|
+
if (Array.isArray(content)) {
|
|
344
|
+
debugLog(
|
|
345
|
+
`[OpenAI] Processing complex content array with ${content.length} items for ${useResponsesAPI ? 'Responses API' : 'Chat Completions API'}`,
|
|
346
|
+
);
|
|
347
|
+
if (useResponsesAPI) {
|
|
348
|
+
// Convert to Responses API format
|
|
349
|
+
const convertedContent = [];
|
|
350
|
+
|
|
351
|
+
for (const item of content) {
|
|
352
|
+
if (item.type === 'text') {
|
|
353
|
+
convertedContent.push({
|
|
354
|
+
type: 'input_text',
|
|
355
|
+
text: item.text,
|
|
356
|
+
});
|
|
357
|
+
} else if (item.type === 'image' && item.source) {
|
|
358
|
+
// Convert Anthropic/Claude format to OpenAI Responses API format
|
|
359
|
+
const imageUrl = `data:${item.source.media_type};base64,${item.source.data}`;
|
|
360
|
+
debugLog(
|
|
361
|
+
`[OpenAI] Converting image for Responses API: ${item.source.media_type}, data length: ${item.source.data.length}`,
|
|
362
|
+
);
|
|
363
|
+
convertedContent.push({
|
|
364
|
+
type: 'input_image',
|
|
365
|
+
image_url: imageUrl,
|
|
366
|
+
});
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
return { role, content: convertedContent };
|
|
371
|
+
} else {
|
|
372
|
+
// Convert to Chat Completions API format
|
|
373
|
+
const convertedContent = [];
|
|
374
|
+
|
|
375
|
+
for (const item of content) {
|
|
376
|
+
if (item.type === 'text') {
|
|
377
|
+
convertedContent.push({
|
|
378
|
+
type: 'text',
|
|
379
|
+
text: item.text,
|
|
380
|
+
});
|
|
381
|
+
} else if (item.type === 'image' && item.source) {
|
|
382
|
+
// Convert Anthropic/Claude format to OpenAI Chat Completions format
|
|
383
|
+
const imageUrl = `data:${item.source.media_type};base64,${item.source.data}`;
|
|
384
|
+
debugLog(
|
|
385
|
+
`[OpenAI] Converting image for Chat Completions API: ${item.source.media_type}, data length: ${item.source.data.length}`,
|
|
386
|
+
);
|
|
387
|
+
convertedContent.push({
|
|
388
|
+
type: 'image_url',
|
|
389
|
+
image_url: {
|
|
390
|
+
url: imageUrl,
|
|
391
|
+
detail: 'high',
|
|
392
|
+
},
|
|
393
|
+
});
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
return { role, content: convertedContent };
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
// Simple string content
|
|
402
|
+
return { role, content };
|
|
403
|
+
});
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
/**
|
|
407
|
+
* Main OpenAI provider implementation
|
|
408
|
+
*/
|
|
409
|
+
export const openaiProvider = {
|
|
410
|
+
/**
|
|
411
|
+
* Unified provider interface: invoke messages with options
|
|
412
|
+
* @param {Array} messages - Array of message objects with role and content
|
|
413
|
+
* @param {Object} options - Configuration options
|
|
414
|
+
* @returns {Object|AsyncGenerator} - { content, stop_reason, rawResponse } or AsyncGenerator when stream=true
|
|
415
|
+
*/
|
|
416
|
+
async invoke(messages, options = {}) {
|
|
417
|
+
const {
|
|
418
|
+
model = 'gpt-4o-mini',
|
|
419
|
+
temperature = 0.7,
|
|
420
|
+
maxTokens = null,
|
|
421
|
+
stream = false,
|
|
422
|
+
reasoning_effort = 'medium',
|
|
423
|
+
verbosity = 'medium',
|
|
424
|
+
use_websearch = false,
|
|
425
|
+
signal,
|
|
426
|
+
config,
|
|
427
|
+
...otherOptions
|
|
428
|
+
} = options;
|
|
429
|
+
|
|
430
|
+
// Validate API key
|
|
431
|
+
if (!config?.apiKeys?.openai) {
|
|
432
|
+
throw new OpenAIProviderError(
|
|
433
|
+
'OpenAI API key not configured',
|
|
434
|
+
'MISSING_API_KEY',
|
|
435
|
+
);
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
if (!validateApiKey(config.apiKeys.openai)) {
|
|
439
|
+
throw new OpenAIProviderError(
|
|
440
|
+
'Invalid OpenAI API key format',
|
|
441
|
+
'INVALID_API_KEY',
|
|
442
|
+
);
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
// Initialize OpenAI client
|
|
446
|
+
const openai = new OpenAI({
|
|
447
|
+
apiKey: config.apiKeys.openai,
|
|
448
|
+
});
|
|
449
|
+
|
|
450
|
+
// Resolve model name
|
|
451
|
+
const resolvedModel = resolveModelName(model);
|
|
452
|
+
const modelConfig = SUPPORTED_MODELS[resolvedModel] || {};
|
|
453
|
+
|
|
454
|
+
// Always use Responses API since all OpenAI models support it
|
|
455
|
+
// Only fallback to Chat Completions API if Responses API is explicitly not supported
|
|
456
|
+
const shouldUseResponsesAPI = modelConfig.supportsResponsesAPI !== false;
|
|
457
|
+
|
|
458
|
+
// Convert and validate messages
|
|
459
|
+
const openaiMessages = convertMessages(messages, shouldUseResponsesAPI);
|
|
460
|
+
|
|
461
|
+
// Build request payload based on API type
|
|
462
|
+
let requestPayload;
|
|
463
|
+
|
|
464
|
+
if (shouldUseResponsesAPI) {
|
|
465
|
+
// Build Responses API payload
|
|
466
|
+
requestPayload = {
|
|
467
|
+
model: resolvedModel,
|
|
468
|
+
input: openaiMessages,
|
|
469
|
+
stream,
|
|
470
|
+
...otherOptions,
|
|
471
|
+
};
|
|
472
|
+
|
|
473
|
+
// Add web search tools only if requested and model supports it
|
|
474
|
+
if (use_websearch && modelConfig.supportsWebSearch) {
|
|
475
|
+
// Use web_search_preview tool for all models in Responses API
|
|
476
|
+
requestPayload.tools = [{ type: 'web_search_preview' }];
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
// Add temperature if model supports it
|
|
480
|
+
if (
|
|
481
|
+
modelConfig.supportsTemperature !== false &&
|
|
482
|
+
temperature !== undefined
|
|
483
|
+
) {
|
|
484
|
+
requestPayload.temperature = Math.max(0, Math.min(2, temperature));
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
// Add reasoning effort for thinking models (o3 series and GPT-5 family)
|
|
488
|
+
if (
|
|
489
|
+
(resolvedModel.startsWith('o3') || resolvedModel.startsWith('gpt-5')) &&
|
|
490
|
+
reasoning_effort
|
|
491
|
+
) {
|
|
492
|
+
// GPT-5 Pro only supports 'high' reasoning effort
|
|
493
|
+
const effectiveEffort =
|
|
494
|
+
resolvedModel === 'gpt-5-pro' ? 'high' : reasoning_effort;
|
|
495
|
+
requestPayload.reasoning = {
|
|
496
|
+
effort: effectiveEffort,
|
|
497
|
+
summary: 'auto', // Enable reasoning summaries
|
|
498
|
+
};
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// Add verbosity for GPT-5 models
|
|
502
|
+
if (resolvedModel.startsWith('gpt-5') && verbosity) {
|
|
503
|
+
requestPayload.text = { verbosity };
|
|
504
|
+
}
|
|
505
|
+
} else {
|
|
506
|
+
// Build Chat Completions API payload
|
|
507
|
+
const {
|
|
508
|
+
reasoning_effort: _unused,
|
|
509
|
+
verbosity: _unused2,
|
|
510
|
+
...cleanOptions
|
|
511
|
+
} = otherOptions;
|
|
512
|
+
requestPayload = {
|
|
513
|
+
model: resolvedModel,
|
|
514
|
+
messages: openaiMessages,
|
|
515
|
+
stream,
|
|
516
|
+
...cleanOptions,
|
|
517
|
+
};
|
|
518
|
+
|
|
519
|
+
// Add temperature if model supports it
|
|
520
|
+
if (
|
|
521
|
+
modelConfig.supportsTemperature !== false &&
|
|
522
|
+
temperature !== undefined
|
|
523
|
+
) {
|
|
524
|
+
requestPayload.temperature = Math.max(0, Math.min(2, temperature));
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
// Add reasoning effort for thinking models (o3 series and GPT-5 family)
|
|
528
|
+
if (
|
|
529
|
+
(resolvedModel.startsWith('o3') || resolvedModel.startsWith('gpt-5')) &&
|
|
530
|
+
reasoning_effort
|
|
531
|
+
) {
|
|
532
|
+
// GPT-5 Pro only supports 'high' reasoning effort
|
|
533
|
+
const effectiveEffort =
|
|
534
|
+
resolvedModel === 'gpt-5-pro' ? 'high' : reasoning_effort;
|
|
535
|
+
requestPayload.reasoning_effort = effectiveEffort;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
// Add verbosity for GPT-5 models
|
|
539
|
+
if (resolvedModel.startsWith('gpt-5') && verbosity) {
|
|
540
|
+
requestPayload.verbosity = verbosity;
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
// Add max tokens if specified (both APIs)
|
|
545
|
+
if (maxTokens) {
|
|
546
|
+
if (shouldUseResponsesAPI) {
|
|
547
|
+
requestPayload.max_output_tokens = Math.min(
|
|
548
|
+
maxTokens,
|
|
549
|
+
modelConfig.maxOutputTokens || 100000,
|
|
550
|
+
);
|
|
551
|
+
} else {
|
|
552
|
+
requestPayload.max_tokens = Math.min(
|
|
553
|
+
maxTokens,
|
|
554
|
+
modelConfig.maxOutputTokens || 100000,
|
|
555
|
+
);
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
// Add usage reporting for streaming mode
|
|
560
|
+
if (stream && !shouldUseResponsesAPI) {
|
|
561
|
+
requestPayload.stream_options = { include_usage: true };
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
// If streaming is requested and model doesn't support it, fall back to non-streaming
|
|
565
|
+
if (stream && modelConfig.supportsStreaming === false) {
|
|
566
|
+
debugLog(
|
|
567
|
+
`[OpenAI] Model ${resolvedModel} doesn't support streaming, falling back to non-streaming mode`,
|
|
568
|
+
);
|
|
569
|
+
requestPayload.stream = false;
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
// Handle streaming requests
|
|
573
|
+
if (stream && requestPayload.stream !== false) {
|
|
574
|
+
return this._createStreamingGenerator(
|
|
575
|
+
openai,
|
|
576
|
+
requestPayload,
|
|
577
|
+
shouldUseResponsesAPI,
|
|
578
|
+
resolvedModel,
|
|
579
|
+
modelConfig,
|
|
580
|
+
use_websearch,
|
|
581
|
+
signal,
|
|
582
|
+
);
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
try {
|
|
586
|
+
const apiType = shouldUseResponsesAPI
|
|
587
|
+
? 'Responses API'
|
|
588
|
+
: 'Chat Completions API';
|
|
589
|
+
const searchInfo =
|
|
590
|
+
use_websearch && modelConfig.supportsWebSearch
|
|
591
|
+
? ' (with web search)'
|
|
592
|
+
: '';
|
|
593
|
+
debugLog(
|
|
594
|
+
`[OpenAI] Calling ${resolvedModel} via ${apiType} with ${openaiMessages.length} messages${searchInfo}`,
|
|
595
|
+
);
|
|
596
|
+
|
|
597
|
+
const startTime = Date.now();
|
|
598
|
+
|
|
599
|
+
// Check if already aborted before making request
|
|
600
|
+
if (signal?.aborted) {
|
|
601
|
+
throw new Error(`Request aborted: ${signal.reason || 'Cancelled'}`);
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
// Make the API call based on API type
|
|
605
|
+
let response;
|
|
606
|
+
if (shouldUseResponsesAPI) {
|
|
607
|
+
// The signal is used internally by the OpenAI SDK for cancellation
|
|
608
|
+
// Don't pass it as a parameter to the API
|
|
609
|
+
response = await openai.responses.create(requestPayload);
|
|
610
|
+
} else {
|
|
611
|
+
// The signal is used internally by the OpenAI SDK for cancellation
|
|
612
|
+
// Don't pass it as a parameter to the API
|
|
613
|
+
response = await openai.chat.completions.create(requestPayload);
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
const responseTime = Date.now() - startTime;
|
|
617
|
+
debugLog(`[OpenAI] Response received in ${responseTime}ms`);
|
|
618
|
+
|
|
619
|
+
// Extract response data based on API type
|
|
620
|
+
let content, stopReason, usage;
|
|
621
|
+
|
|
622
|
+
if (shouldUseResponsesAPI) {
|
|
623
|
+
// Handle Responses API response format
|
|
624
|
+
let reasoningSummary = null;
|
|
625
|
+
|
|
626
|
+
if (response.output) {
|
|
627
|
+
// New format with output array (includes reasoning summaries)
|
|
628
|
+
const messageOutput = response.output.find(
|
|
629
|
+
(item) => item.type === 'message',
|
|
630
|
+
);
|
|
631
|
+
const reasoningOutput = response.output.find(
|
|
632
|
+
(item) => item.type === 'reasoning',
|
|
633
|
+
);
|
|
634
|
+
|
|
635
|
+
if (!messageOutput || !messageOutput.content) {
|
|
636
|
+
throw new OpenAIProviderError(
|
|
637
|
+
'No message content in Responses API response',
|
|
638
|
+
'NO_RESPONSE_CONTENT',
|
|
639
|
+
);
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
// Extract content from message output
|
|
643
|
+
const textContent = messageOutput.content.find(
|
|
644
|
+
(item) => item.type === 'output_text',
|
|
645
|
+
);
|
|
646
|
+
if (!textContent) {
|
|
647
|
+
throw new OpenAIProviderError(
|
|
648
|
+
'No text content in message output',
|
|
649
|
+
'NO_RESPONSE_CONTENT',
|
|
650
|
+
);
|
|
651
|
+
}
|
|
652
|
+
content = textContent.text;
|
|
653
|
+
|
|
654
|
+
// Extract reasoning summary if available
|
|
655
|
+
if (reasoningOutput && reasoningOutput.summary) {
|
|
656
|
+
const summaryText = reasoningOutput.summary.find(
|
|
657
|
+
(item) => item.type === 'summary_text',
|
|
658
|
+
);
|
|
659
|
+
if (summaryText) {
|
|
660
|
+
reasoningSummary = summaryText.text;
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
} else if (response.output_text) {
|
|
664
|
+
// Legacy format
|
|
665
|
+
content = response.output_text;
|
|
666
|
+
} else {
|
|
667
|
+
throw new OpenAIProviderError(
|
|
668
|
+
'No output in Responses API response',
|
|
669
|
+
'NO_RESPONSE_CONTENT',
|
|
670
|
+
);
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
stopReason = response.status || 'stop';
|
|
674
|
+
usage = response.usage || {};
|
|
675
|
+
|
|
676
|
+
// Store reasoning summary in metadata
|
|
677
|
+
if (reasoningSummary) {
|
|
678
|
+
usage.reasoning_summary = reasoningSummary;
|
|
679
|
+
debugLog(
|
|
680
|
+
`[OpenAI] Found reasoning summary: ${reasoningSummary.substring(0, 100)}...`,
|
|
681
|
+
);
|
|
682
|
+
} else {
|
|
683
|
+
debugLog('[OpenAI] No reasoning summary found in response');
|
|
684
|
+
debugLog(
|
|
685
|
+
'[OpenAI] Response structure:',
|
|
686
|
+
JSON.stringify(response, null, 2).substring(0, 500),
|
|
687
|
+
);
|
|
688
|
+
}
|
|
689
|
+
} else {
|
|
690
|
+
// Handle Chat Completions API response format
|
|
691
|
+
const choice = response.choices[0];
|
|
692
|
+
if (!choice) {
|
|
693
|
+
throw new OpenAIProviderError(
|
|
694
|
+
'No response choice received from OpenAI',
|
|
695
|
+
'NO_RESPONSE_CHOICE',
|
|
696
|
+
);
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
content = choice.message?.content;
|
|
700
|
+
if (!content) {
|
|
701
|
+
throw new OpenAIProviderError(
|
|
702
|
+
'No content in response from OpenAI',
|
|
703
|
+
'NO_RESPONSE_CONTENT',
|
|
704
|
+
);
|
|
705
|
+
}
|
|
706
|
+
stopReason = choice.finish_reason || 'stop';
|
|
707
|
+
usage = response.usage || {};
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
// Determine web search usage
|
|
711
|
+
const webSearchUsed = use_websearch && modelConfig.supportsWebSearch;
|
|
712
|
+
const webSearchType = webSearchUsed ? 'web_search_preview' : null;
|
|
713
|
+
|
|
714
|
+
// Return unified response format
|
|
715
|
+
return {
|
|
716
|
+
content,
|
|
717
|
+
stop_reason: stopReason,
|
|
718
|
+
rawResponse: response,
|
|
719
|
+
metadata: {
|
|
720
|
+
model: response.model || resolvedModel,
|
|
721
|
+
usage: {
|
|
722
|
+
input_tokens: usage.prompt_tokens || usage.input_tokens || 0,
|
|
723
|
+
output_tokens: usage.completion_tokens || usage.output_tokens || 0,
|
|
724
|
+
total_tokens: usage.total_tokens || 0,
|
|
725
|
+
},
|
|
726
|
+
response_time_ms: responseTime,
|
|
727
|
+
finish_reason: stopReason,
|
|
728
|
+
provider: 'openai',
|
|
729
|
+
api_type: apiType,
|
|
730
|
+
web_search_used: webSearchUsed,
|
|
731
|
+
web_search_type: webSearchType,
|
|
732
|
+
},
|
|
733
|
+
};
|
|
734
|
+
} catch (error) {
|
|
735
|
+
debugError('[OpenAI] Error during API call:', error);
|
|
736
|
+
|
|
737
|
+
// Handle specific OpenAI errors
|
|
738
|
+
if (error.code === 'insufficient_quota') {
|
|
739
|
+
throw new OpenAIProviderError(
|
|
740
|
+
'OpenAI API quota exceeded',
|
|
741
|
+
'QUOTA_EXCEEDED',
|
|
742
|
+
error,
|
|
743
|
+
);
|
|
744
|
+
} else if (error.code === 'invalid_api_key') {
|
|
745
|
+
throw new OpenAIProviderError(
|
|
746
|
+
'Invalid OpenAI API key',
|
|
747
|
+
'INVALID_API_KEY',
|
|
748
|
+
error,
|
|
749
|
+
);
|
|
750
|
+
} else if (error.code === 'model_not_found') {
|
|
751
|
+
throw new OpenAIProviderError(
|
|
752
|
+
`Model ${resolvedModel} not found`,
|
|
753
|
+
'MODEL_NOT_FOUND',
|
|
754
|
+
error,
|
|
755
|
+
);
|
|
756
|
+
} else if (error.code === 'context_length_exceeded') {
|
|
757
|
+
throw new OpenAIProviderError(
|
|
758
|
+
'Context length exceeded for model',
|
|
759
|
+
'CONTEXT_LENGTH_EXCEEDED',
|
|
760
|
+
error,
|
|
761
|
+
);
|
|
762
|
+
} else if (error.type === 'invalid_request_error') {
|
|
763
|
+
throw new OpenAIProviderError(
|
|
764
|
+
`Invalid request: ${error.message}`,
|
|
765
|
+
'INVALID_REQUEST',
|
|
766
|
+
error,
|
|
767
|
+
);
|
|
768
|
+
} else if (error.type === 'rate_limit_error') {
|
|
769
|
+
throw new OpenAIProviderError(
|
|
770
|
+
'OpenAI rate limit exceeded',
|
|
771
|
+
'RATE_LIMIT_EXCEEDED',
|
|
772
|
+
error,
|
|
773
|
+
);
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
// Generic error handling
|
|
777
|
+
throw new OpenAIProviderError(
|
|
778
|
+
`OpenAI API error: ${error.message || 'Unknown error'}`,
|
|
779
|
+
'API_ERROR',
|
|
780
|
+
error,
|
|
781
|
+
);
|
|
782
|
+
}
|
|
783
|
+
},
|
|
784
|
+
|
|
785
|
+
/**
|
|
786
|
+
* Create streaming generator for OpenAI responses
|
|
787
|
+
* @private
|
|
788
|
+
* @param {OpenAI} openai - OpenAI client instance
|
|
789
|
+
* @param {Object} requestPayload - Request payload
|
|
790
|
+
* @param {boolean} shouldUseResponsesAPI - Whether to use Responses API
|
|
791
|
+
* @param {string} resolvedModel - Resolved model name
|
|
792
|
+
* @param {Object} modelConfig - Model configuration
|
|
793
|
+
* @param {boolean} use_websearch - Whether web search is enabled
|
|
794
|
+
* @returns {AsyncGenerator} - Streaming generator yielding events
|
|
795
|
+
*/
|
|
796
|
+
async *_createStreamingGenerator(
|
|
797
|
+
openai,
|
|
798
|
+
requestPayload,
|
|
799
|
+
shouldUseResponsesAPI,
|
|
800
|
+
resolvedModel,
|
|
801
|
+
modelConfig,
|
|
802
|
+
use_websearch,
|
|
803
|
+
signal,
|
|
804
|
+
) {
|
|
805
|
+
const apiType = shouldUseResponsesAPI
|
|
806
|
+
? 'Responses API'
|
|
807
|
+
: 'Chat Completions API';
|
|
808
|
+
const searchInfo =
|
|
809
|
+
use_websearch && modelConfig.supportsWebSearch
|
|
810
|
+
? ' (with web search)'
|
|
811
|
+
: '';
|
|
812
|
+
|
|
813
|
+
debugLog(
|
|
814
|
+
`[OpenAI] Starting streaming for ${resolvedModel} via ${apiType} with ${requestPayload.input?.length || requestPayload.messages?.length} messages${searchInfo}`,
|
|
815
|
+
);
|
|
816
|
+
|
|
817
|
+
const startTime = Date.now();
|
|
818
|
+
let totalContent = '';
|
|
819
|
+
let totalReasoningSummary = '';
|
|
820
|
+
let lastUsage = null;
|
|
821
|
+
let finishReason = null;
|
|
822
|
+
let finalModel = resolvedModel;
|
|
823
|
+
|
|
824
|
+
try {
|
|
825
|
+
// Check if already aborted before starting
|
|
826
|
+
if (signal?.aborted) {
|
|
827
|
+
throw new Error(`Request aborted: ${signal.reason || 'Cancelled'}`);
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
// Yield start event
|
|
831
|
+
yield {
|
|
832
|
+
type: 'start',
|
|
833
|
+
timestamp: new Date().toISOString(),
|
|
834
|
+
model: resolvedModel,
|
|
835
|
+
provider: 'openai',
|
|
836
|
+
api_type: apiType,
|
|
837
|
+
};
|
|
838
|
+
|
|
839
|
+
// Create stream based on API type
|
|
840
|
+
let stream;
|
|
841
|
+
if (shouldUseResponsesAPI) {
|
|
842
|
+
// The signal is used internally by the OpenAI SDK for cancellation
|
|
843
|
+
// Don't pass it as a parameter to the API
|
|
844
|
+
stream = await openai.responses.create(requestPayload);
|
|
845
|
+
} else {
|
|
846
|
+
// The signal is used internally by the OpenAI SDK for cancellation
|
|
847
|
+
// Don't pass it as a parameter to the API
|
|
848
|
+
stream = await openai.chat.completions.create(requestPayload);
|
|
849
|
+
}
|
|
850
|
+
|
|
851
|
+
// Process stream chunks
|
|
852
|
+
for await (const chunk of stream) {
|
|
853
|
+
try {
|
|
854
|
+
// Check for cancellation during stream processing
|
|
855
|
+
if (signal?.aborted) {
|
|
856
|
+
debugLog(
|
|
857
|
+
`[OpenAI] Stream aborted during processing: ${signal.reason || 'Cancelled'}`,
|
|
858
|
+
);
|
|
859
|
+
break;
|
|
860
|
+
}
|
|
861
|
+
if (shouldUseResponsesAPI) {
|
|
862
|
+
// Handle Responses API streaming format
|
|
863
|
+
if (chunk.type === 'response.output_text.delta') {
|
|
864
|
+
const content = chunk.delta || '';
|
|
865
|
+
if (content) {
|
|
866
|
+
totalContent += content;
|
|
867
|
+
yield {
|
|
868
|
+
type: 'delta',
|
|
869
|
+
content,
|
|
870
|
+
timestamp: new Date().toISOString(),
|
|
871
|
+
};
|
|
872
|
+
}
|
|
873
|
+
} else if (chunk.type === 'response.reasoning_summary_part.added') {
|
|
874
|
+
// Event 1: reasoning summary part added (usually empty initially)
|
|
875
|
+
debugLog('[OpenAI] *** REASONING PART ADDED');
|
|
876
|
+
} else if (chunk.type === 'response.reasoning_summary_part.done') {
|
|
877
|
+
// Event 2: reasoning summary part completed with full text
|
|
878
|
+
const summaryText = chunk.part?.text || '';
|
|
879
|
+
if (summaryText) {
|
|
880
|
+
totalReasoningSummary = summaryText;
|
|
881
|
+
debugLog(
|
|
882
|
+
`[OpenAI] *** REASONING PART DONE: "${summaryText.substring(0, 100)}..."`,
|
|
883
|
+
);
|
|
884
|
+
|
|
885
|
+
yield {
|
|
886
|
+
type: 'reasoning_summary',
|
|
887
|
+
content: totalReasoningSummary,
|
|
888
|
+
timestamp: new Date().toISOString(),
|
|
889
|
+
};
|
|
890
|
+
}
|
|
891
|
+
} else if (chunk.type === 'response.reasoning_summary_text.delta') {
|
|
892
|
+
// Event 3: reasoning summary text delta (streaming pieces)
|
|
893
|
+
const summaryDelta = chunk.delta || '';
|
|
894
|
+
if (summaryDelta) {
|
|
895
|
+
totalReasoningSummary += summaryDelta;
|
|
896
|
+
debugLog(
|
|
897
|
+
`[OpenAI] *** REASONING TEXT DELTA: "${summaryDelta}"`,
|
|
898
|
+
);
|
|
899
|
+
|
|
900
|
+
yield {
|
|
901
|
+
type: 'reasoning_summary',
|
|
902
|
+
content: totalReasoningSummary,
|
|
903
|
+
timestamp: new Date().toISOString(),
|
|
904
|
+
};
|
|
905
|
+
}
|
|
906
|
+
} else if (chunk.type === 'response.reasoning_summary_text.done') {
|
|
907
|
+
// Event 4: reasoning summary text completed with full text
|
|
908
|
+
const fullSummary = chunk.text || totalReasoningSummary;
|
|
909
|
+
if (fullSummary) {
|
|
910
|
+
totalReasoningSummary = fullSummary;
|
|
911
|
+
debugLog(
|
|
912
|
+
`[OpenAI] *** REASONING TEXT DONE: "${fullSummary.substring(0, 100)}..."`,
|
|
913
|
+
);
|
|
914
|
+
|
|
915
|
+
yield {
|
|
916
|
+
type: 'reasoning_summary',
|
|
917
|
+
content: fullSummary,
|
|
918
|
+
timestamp: new Date().toISOString(),
|
|
919
|
+
};
|
|
920
|
+
}
|
|
921
|
+
} else if (chunk.type === 'response.completed') {
|
|
922
|
+
finishReason = chunk.response?.status || 'stop';
|
|
923
|
+
finalModel = chunk.response?.model || resolvedModel;
|
|
924
|
+
if (chunk.response?.usage) {
|
|
925
|
+
lastUsage = chunk.response.usage;
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
} else {
|
|
929
|
+
// Handle Chat Completions API streaming format
|
|
930
|
+
const choice = chunk.choices?.[0];
|
|
931
|
+
if (choice) {
|
|
932
|
+
const content = choice.delta?.content || '';
|
|
933
|
+
if (content) {
|
|
934
|
+
totalContent += content;
|
|
935
|
+
yield {
|
|
936
|
+
type: 'delta',
|
|
937
|
+
content,
|
|
938
|
+
timestamp: new Date().toISOString(),
|
|
939
|
+
};
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
if (choice.finish_reason) {
|
|
943
|
+
finishReason = choice.finish_reason;
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
// Handle usage information (typically in final chunk)
|
|
948
|
+
if (chunk.usage) {
|
|
949
|
+
lastUsage = chunk.usage;
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
// Update model if provided
|
|
953
|
+
if (chunk.model) {
|
|
954
|
+
finalModel = chunk.model;
|
|
955
|
+
}
|
|
956
|
+
}
|
|
957
|
+
} catch (chunkError) {
|
|
958
|
+
debugError('[OpenAI] Error processing stream chunk:', chunkError);
|
|
959
|
+
yield {
|
|
960
|
+
type: 'error',
|
|
961
|
+
error: {
|
|
962
|
+
message: `Chunk processing error: ${chunkError.message}`,
|
|
963
|
+
code: 'CHUNK_PROCESSING_ERROR',
|
|
964
|
+
recoverable: true,
|
|
965
|
+
},
|
|
966
|
+
timestamp: new Date().toISOString(),
|
|
967
|
+
};
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
const responseTime = Date.now() - startTime;
|
|
972
|
+
debugLog(`[OpenAI] Streaming completed in ${responseTime}ms`);
|
|
973
|
+
|
|
974
|
+
// Yield usage information if available
|
|
975
|
+
if (lastUsage) {
|
|
976
|
+
yield {
|
|
977
|
+
type: 'usage',
|
|
978
|
+
usage: {
|
|
979
|
+
input_tokens:
|
|
980
|
+
lastUsage.prompt_tokens || lastUsage.input_tokens || 0,
|
|
981
|
+
output_tokens:
|
|
982
|
+
lastUsage.completion_tokens || lastUsage.output_tokens || 0,
|
|
983
|
+
total_tokens: lastUsage.total_tokens || 0,
|
|
984
|
+
},
|
|
985
|
+
timestamp: new Date().toISOString(),
|
|
986
|
+
};
|
|
987
|
+
}
|
|
988
|
+
|
|
989
|
+
// Determine web search usage
|
|
990
|
+
const webSearchUsed = use_websearch && modelConfig.supportsWebSearch;
|
|
991
|
+
const webSearchType = webSearchUsed ? 'web_search_preview' : null;
|
|
992
|
+
|
|
993
|
+
// Yield end event with final metadata
|
|
994
|
+
yield {
|
|
995
|
+
type: 'end',
|
|
996
|
+
content: totalContent,
|
|
997
|
+
stop_reason: finishReason || 'stop',
|
|
998
|
+
metadata: {
|
|
999
|
+
model: finalModel,
|
|
1000
|
+
usage: {
|
|
1001
|
+
input_tokens:
|
|
1002
|
+
lastUsage?.prompt_tokens || lastUsage?.input_tokens || 0,
|
|
1003
|
+
output_tokens:
|
|
1004
|
+
lastUsage?.completion_tokens || lastUsage?.output_tokens || 0,
|
|
1005
|
+
total_tokens: lastUsage?.total_tokens || 0,
|
|
1006
|
+
},
|
|
1007
|
+
response_time_ms: responseTime,
|
|
1008
|
+
finish_reason: finishReason || 'stop',
|
|
1009
|
+
provider: 'openai',
|
|
1010
|
+
api_type: apiType,
|
|
1011
|
+
web_search_used: webSearchUsed,
|
|
1012
|
+
web_search_type: webSearchType,
|
|
1013
|
+
reasoning_summary: totalReasoningSummary || null,
|
|
1014
|
+
},
|
|
1015
|
+
timestamp: new Date().toISOString(),
|
|
1016
|
+
};
|
|
1017
|
+
} catch (error) {
|
|
1018
|
+
debugError('[OpenAI] Streaming error:', error);
|
|
1019
|
+
|
|
1020
|
+
// Handle specific OpenAI errors in streaming context
|
|
1021
|
+
let errorCode = 'STREAMING_ERROR';
|
|
1022
|
+
let errorMessage = `OpenAI streaming error: ${error.message || 'Unknown error'}`;
|
|
1023
|
+
let recoverable = false;
|
|
1024
|
+
|
|
1025
|
+
if (error.code === 'insufficient_quota') {
|
|
1026
|
+
errorCode = 'QUOTA_EXCEEDED';
|
|
1027
|
+
errorMessage = 'OpenAI API quota exceeded';
|
|
1028
|
+
} else if (error.code === 'invalid_api_key') {
|
|
1029
|
+
errorCode = 'INVALID_API_KEY';
|
|
1030
|
+
errorMessage = 'Invalid OpenAI API key';
|
|
1031
|
+
} else if (error.code === 'model_not_found') {
|
|
1032
|
+
errorCode = 'MODEL_NOT_FOUND';
|
|
1033
|
+
errorMessage = `Model ${resolvedModel} not found`;
|
|
1034
|
+
} else if (error.code === 'context_length_exceeded') {
|
|
1035
|
+
errorCode = 'CONTEXT_LENGTH_EXCEEDED';
|
|
1036
|
+
errorMessage = 'Context length exceeded for model';
|
|
1037
|
+
} else if (error.type === 'rate_limit_error') {
|
|
1038
|
+
errorCode = 'RATE_LIMIT_EXCEEDED';
|
|
1039
|
+
errorMessage = 'OpenAI rate limit exceeded';
|
|
1040
|
+
recoverable = true;
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
yield {
|
|
1044
|
+
type: 'error',
|
|
1045
|
+
error: {
|
|
1046
|
+
message: errorMessage,
|
|
1047
|
+
code: errorCode,
|
|
1048
|
+
recoverable,
|
|
1049
|
+
originalError: error,
|
|
1050
|
+
},
|
|
1051
|
+
timestamp: new Date().toISOString(),
|
|
1052
|
+
};
|
|
1053
|
+
|
|
1054
|
+
// Re-throw the error to maintain existing error handling behavior
|
|
1055
|
+
throw new OpenAIProviderError(errorMessage, errorCode, error);
|
|
1056
|
+
}
|
|
1057
|
+
},
|
|
1058
|
+
|
|
1059
|
+
/**
|
|
1060
|
+
* Validate configuration for OpenAI provider
|
|
1061
|
+
* @param {Object} config - Configuration object
|
|
1062
|
+
* @returns {boolean} - True if configuration is valid
|
|
1063
|
+
*/
|
|
1064
|
+
validateConfig(config) {
|
|
1065
|
+
return !!(config?.apiKeys?.openai && validateApiKey(config.apiKeys.openai));
|
|
1066
|
+
},
|
|
1067
|
+
|
|
1068
|
+
/**
|
|
1069
|
+
* Check if provider is available with current configuration
|
|
1070
|
+
* @param {Object} config - Configuration object
|
|
1071
|
+
* @returns {boolean} - True if provider is available
|
|
1072
|
+
*/
|
|
1073
|
+
isAvailable(config) {
|
|
1074
|
+
return this.validateConfig(config);
|
|
1075
|
+
},
|
|
1076
|
+
|
|
1077
|
+
/**
|
|
1078
|
+
* Get supported models
|
|
1079
|
+
* @returns {Object} - Map of supported models and their configurations
|
|
1080
|
+
*/
|
|
1081
|
+
getSupportedModels() {
|
|
1082
|
+
return SUPPORTED_MODELS;
|
|
1083
|
+
},
|
|
1084
|
+
|
|
1085
|
+
/**
|
|
1086
|
+
* Get model configuration
|
|
1087
|
+
* @param {string} modelName - Model name
|
|
1088
|
+
* @returns {Object|null} - Model configuration or null if not found
|
|
1089
|
+
*/
|
|
1090
|
+
getModelConfig(modelName) {
|
|
1091
|
+
const resolved = resolveModelName(modelName);
|
|
1092
|
+
return SUPPORTED_MODELS[resolved] || null;
|
|
1093
|
+
},
|
|
1094
|
+
};
|