json-object-editor 0.10.653 → 0.10.657
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -1
- package/_www/mcp-test.html +287 -276
- package/css/joe-ai.css +1 -1
- package/css/joe-styles.css +56 -1
- package/css/joe.css +57 -2
- package/css/joe.min.css +1 -1
- package/js/JsonObjectEditor.jquery.craydent.js +152 -17
- package/js/joe-ai.js +2075 -1825
- package/js/joe.js +153 -18
- package/js/joe.min.js +1 -1
- package/package.json +1 -1
- package/readme.md +74 -4
- package/server/fields/core.js +54 -6
- package/server/modules/MCP.js +1364 -1237
- package/server/modules/Sites.js +79 -0
- package/server/modules/Storage.js +28 -1
- package/server/modules/ThoughtPipeline.js +6 -0
- package/server/plugins/awsConnect.js +31 -1
- package/server/plugins/chatgpt.js +1732 -1339
- package/server/schemas/ai_prompt.js +389 -322
- package/server/schemas/ai_response.js +414 -365
- package/server/schemas/status.js +12 -2
- package/server/schemas/task.js +9 -3
|
@@ -1,1339 +1,1732 @@
|
|
|
1
|
-
const OpenAI = require("openai");
|
|
2
|
-
const { google } = require('googleapis');
|
|
3
|
-
const path = require('path');
|
|
4
|
-
const
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
const
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
//get
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
//++
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
//
|
|
36
|
-
//store
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
*
|
|
63
|
-
*
|
|
64
|
-
*
|
|
65
|
-
*
|
|
66
|
-
*
|
|
67
|
-
*
|
|
68
|
-
*
|
|
69
|
-
*
|
|
70
|
-
*
|
|
71
|
-
* - `
|
|
72
|
-
*
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
//
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
*
|
|
111
|
-
*
|
|
112
|
-
*
|
|
113
|
-
*
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
response.output
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
if (item
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
if (
|
|
151
|
-
const
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
lower.includes('
|
|
157
|
-
lower.includes('
|
|
158
|
-
lower.includes('tokens
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
//
|
|
178
|
-
// -
|
|
179
|
-
// -
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
//
|
|
214
|
-
//
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
*
|
|
243
|
-
*
|
|
244
|
-
*
|
|
245
|
-
*
|
|
246
|
-
*
|
|
247
|
-
*
|
|
248
|
-
*
|
|
249
|
-
*
|
|
250
|
-
* -
|
|
251
|
-
* -
|
|
252
|
-
* -
|
|
253
|
-
* -
|
|
254
|
-
*
|
|
255
|
-
*
|
|
256
|
-
*
|
|
257
|
-
*
|
|
258
|
-
*
|
|
259
|
-
*
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
const
|
|
265
|
-
const
|
|
266
|
-
const
|
|
267
|
-
const
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
if (
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
}
|
|
476
|
-
}
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
}
|
|
491
|
-
}
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
}
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
});
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
};
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
}
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
}
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
if (
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
}
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
var
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
}
|
|
1138
|
-
}
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
}
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1
|
+
const OpenAI = require("openai");
|
|
2
|
+
const { google } = require('googleapis');
|
|
3
|
+
const path = require('path');
|
|
4
|
+
const os = require('os');
|
|
5
|
+
const fs = require('fs');
|
|
6
|
+
const MCP = require("../modules/MCP.js");
|
|
7
|
+
// const { name } = require("json-object-editor/server/webconfig");
|
|
8
|
+
|
|
9
|
+
function ChatGPT() {
|
|
10
|
+
// const fetch = (await import('node-fetch')).default;
|
|
11
|
+
//const openai = new OpenAI();
|
|
12
|
+
// Load the service account key JSON file
|
|
13
|
+
const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
|
|
14
|
+
const google_auth = new google.auth.GoogleAuth({
|
|
15
|
+
keyFile: serviceAccountKeyFile,
|
|
16
|
+
scopes: ['https://www.googleapis.com/auth/documents.readonly'],
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
var self = this;
|
|
20
|
+
this.async ={};
|
|
21
|
+
function coloredLog(message){
|
|
22
|
+
console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
|
|
23
|
+
}
|
|
24
|
+
//xx -setup and send a test prompt to chatgpt
|
|
25
|
+
//xx get the api key from joe settings
|
|
26
|
+
|
|
27
|
+
//get a prompt from id
|
|
28
|
+
//send the prompt to chatgpt
|
|
29
|
+
|
|
30
|
+
//++get the cotnent of a file
|
|
31
|
+
//++send the content of a file to chatgpt
|
|
32
|
+
|
|
33
|
+
//++ structure data
|
|
34
|
+
//++ save the response to an ai_repsonse
|
|
35
|
+
//create an ai_response
|
|
36
|
+
//store the content
|
|
37
|
+
//attach to the request
|
|
38
|
+
//store ids sent with the request
|
|
39
|
+
this.default = function(data, req, res) {
|
|
40
|
+
try {
|
|
41
|
+
var payload = {
|
|
42
|
+
params: req.params,
|
|
43
|
+
data: data
|
|
44
|
+
};
|
|
45
|
+
} catch (e) {
|
|
46
|
+
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
|
|
47
|
+
}
|
|
48
|
+
return payload;
|
|
49
|
+
};
|
|
50
|
+
function getAPIKey() {
|
|
51
|
+
const setting = JOE.Utils.Settings('OPENAI_API_KEY');
|
|
52
|
+
if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
|
|
53
|
+
return setting;
|
|
54
|
+
}
|
|
55
|
+
function getSchemaDef(name) {
|
|
56
|
+
if (!name) return { full: null, summary: null };
|
|
57
|
+
const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
|
|
58
|
+
const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
|
|
59
|
+
return { full, summary };
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* callMCPTool
|
|
63
|
+
*
|
|
64
|
+
* Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
|
|
65
|
+
* without going over HTTP or worrying about POST size limits.
|
|
66
|
+
*
|
|
67
|
+
* Usage:
|
|
68
|
+
* const result = await callMCPTool('listSchemas', {}, { req });
|
|
69
|
+
*
|
|
70
|
+
* Notes:
|
|
71
|
+
* - `toolName` must exist on MCP.tools.
|
|
72
|
+
* - `params` should be a plain JSON-serializable object.
|
|
73
|
+
* - `ctx` is optional and can pass `{ req }` or other context that MCP
|
|
74
|
+
* tools might want (for auth, user, etc.).
|
|
75
|
+
*/
|
|
76
|
+
async function callMCPTool(toolName, params = {}, ctx = {}) {
|
|
77
|
+
if (!MCP || !MCP.tools) {
|
|
78
|
+
throw new Error("MCP module not initialized; cannot call MCP tool");
|
|
79
|
+
}
|
|
80
|
+
if (!toolName || typeof toolName !== 'string') {
|
|
81
|
+
throw new Error("Missing or invalid MCP tool name");
|
|
82
|
+
}
|
|
83
|
+
const fn = MCP.tools[toolName];
|
|
84
|
+
if (typeof fn !== 'function') {
|
|
85
|
+
throw new Error(`MCP tool "${toolName}" not found`);
|
|
86
|
+
}
|
|
87
|
+
try {
|
|
88
|
+
// All MCP tools accept (params, ctx) and return a JSON-serializable result.
|
|
89
|
+
// The Responses / tools API often returns arguments as a JSON string, so
|
|
90
|
+
// normalize that here before invoking the tool.
|
|
91
|
+
let toolParams = params;
|
|
92
|
+
if (typeof toolParams === 'string') {
|
|
93
|
+
try {
|
|
94
|
+
toolParams = JSON.parse(toolParams);
|
|
95
|
+
} catch (parseErr) {
|
|
96
|
+
console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
|
|
97
|
+
// Fall back to passing the raw string so tools that expect it still work.
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
const result = await fn(toolParams || {}, ctx || {});
|
|
101
|
+
return result;
|
|
102
|
+
} catch (e) {
|
|
103
|
+
// Surface a clean error upstream but keep details in logs.
|
|
104
|
+
console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
|
|
105
|
+
throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* extractToolCalls
|
|
111
|
+
*
|
|
112
|
+
* Best-effort parser for tool calls from a Responses API result.
|
|
113
|
+
* The Responses output shape may evolve; this function looks for
|
|
114
|
+
* any "tool_call" typed content in response.output[*].content[*]
|
|
115
|
+
* and normalizes it into `{ name, arguments }` objects.
|
|
116
|
+
*/
|
|
117
|
+
function extractToolCalls(response) {
|
|
118
|
+
var calls = [];
|
|
119
|
+
if (!response || !Array.isArray(response.output)) { return calls; }
|
|
120
|
+
|
|
121
|
+
response.output.forEach(function (item) {
|
|
122
|
+
if (!item) { return; }
|
|
123
|
+
// v1-style: item.type === 'tool_call'
|
|
124
|
+
if (item.type === 'function_call') {
|
|
125
|
+
calls.push({
|
|
126
|
+
name: item.name || item.function_name,
|
|
127
|
+
arguments: item.arguments || item.function_arguments || {}
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
// message-style: item.content is an array of parts
|
|
131
|
+
if (Array.isArray(item.content)) {
|
|
132
|
+
item.content.forEach(function (part) {
|
|
133
|
+
if (!part) { return; }
|
|
134
|
+
if (part.type === 'function_call') {
|
|
135
|
+
calls.push({
|
|
136
|
+
name: part.name || part.tool_name,
|
|
137
|
+
arguments: part.arguments || part.args || {}
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
return calls;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Detect "request too large / token limit" style errors from the Responses API.
|
|
148
|
+
function isTokenLimitError(err) {
|
|
149
|
+
if (!err || typeof err !== 'object') return false;
|
|
150
|
+
if (err.status !== 429 && err.status !== 400) return false;
|
|
151
|
+
const msg = (err.error && err.error.message) || err.message || '';
|
|
152
|
+
if (!msg) return false;
|
|
153
|
+
const lower = String(msg).toLowerCase();
|
|
154
|
+
// Cover common phrasing from OpenAI for context/TPM limits.
|
|
155
|
+
return (
|
|
156
|
+
lower.includes('request too large') ||
|
|
157
|
+
lower.includes('too many tokens') ||
|
|
158
|
+
lower.includes('max tokens') ||
|
|
159
|
+
lower.includes('maximum context length') ||
|
|
160
|
+
lower.includes('tokens per min')
|
|
161
|
+
);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Create a compact representation of a JOE object for use in slim payloads.
|
|
165
|
+
function slimJOEObject(item) {
|
|
166
|
+
if (!item || typeof item !== 'object') return item;
|
|
167
|
+
const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
|
|
168
|
+
const info = item.info || item.description || item.summary || '';
|
|
169
|
+
return {
|
|
170
|
+
_id: item._id,
|
|
171
|
+
itemtype: item.itemtype,
|
|
172
|
+
name: name,
|
|
173
|
+
info: info
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// Given an `understandObject` result, produce a slimmed version:
|
|
178
|
+
// - keep `object` as-is
|
|
179
|
+
// - keep `flattened` for the main object (depth-limited) if present
|
|
180
|
+
// - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
|
|
181
|
+
// - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
|
|
182
|
+
function slimUnderstandObjectResult(result) {
|
|
183
|
+
if (!result || typeof result !== 'object') return result;
|
|
184
|
+
const out = {
|
|
185
|
+
_id: result._id,
|
|
186
|
+
itemtype: result.itemtype,
|
|
187
|
+
object: result.object,
|
|
188
|
+
// retain main flattened view if available; this is typically much smaller
|
|
189
|
+
flattened: result.flattened || null,
|
|
190
|
+
schemas: result.schemas || {},
|
|
191
|
+
tags: result.tags || {},
|
|
192
|
+
statuses: result.statuses || {},
|
|
193
|
+
slim: true
|
|
194
|
+
};
|
|
195
|
+
if (Array.isArray(result.related)) {
|
|
196
|
+
out.related = result.related.map(function (rel) {
|
|
197
|
+
if (!rel) return rel;
|
|
198
|
+
const base = rel.object || {};
|
|
199
|
+
const slim = slimJOEObject(base);
|
|
200
|
+
return {
|
|
201
|
+
field: rel.field,
|
|
202
|
+
_id: slim && slim._id || rel._id,
|
|
203
|
+
itemtype: slim && slim.itemtype || rel.itemtype,
|
|
204
|
+
object: slim
|
|
205
|
+
};
|
|
206
|
+
});
|
|
207
|
+
} else {
|
|
208
|
+
out.related = [];
|
|
209
|
+
}
|
|
210
|
+
return out;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// Walk the messages array and, for any system message containing a JSON payload
|
|
214
|
+
// of the form { "tool": "understandObject", "result": {...} }, replace the
|
|
215
|
+
// result with a slimmed version to reduce token count. Returns a new array; if
|
|
216
|
+
// nothing was changed, returns the original array.
|
|
217
|
+
function shrinkUnderstandObjectMessagesForTokens(messages) {
|
|
218
|
+
if (!Array.isArray(messages)) return messages;
|
|
219
|
+
let changed = false;
|
|
220
|
+
const shrunk = messages.map(function (msg) {
|
|
221
|
+
if (!msg || msg.role !== 'system') return msg;
|
|
222
|
+
if (typeof msg.content !== 'string') return msg;
|
|
223
|
+
try {
|
|
224
|
+
const parsed = JSON.parse(msg.content);
|
|
225
|
+
if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
|
|
226
|
+
return msg;
|
|
227
|
+
}
|
|
228
|
+
const slimmed = slimUnderstandObjectResult(parsed.result);
|
|
229
|
+
changed = true;
|
|
230
|
+
return {
|
|
231
|
+
...msg,
|
|
232
|
+
content: JSON.stringify({ tool: 'understandObject', result: slimmed })
|
|
233
|
+
};
|
|
234
|
+
} catch (_e) {
|
|
235
|
+
return msg;
|
|
236
|
+
}
|
|
237
|
+
});
|
|
238
|
+
return changed ? shrunk : messages;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* runWithTools
|
|
243
|
+
*
|
|
244
|
+
* Single orchestration function for calling the OpenAI Responses API
|
|
245
|
+
* with optional tools (sourced from a JOE `ai_assistant`), handling
|
|
246
|
+
* tool calls via MCP, and issuing a follow-up model call with the
|
|
247
|
+
* tool results injected.
|
|
248
|
+
*
|
|
249
|
+
* Inputs (opts):
|
|
250
|
+
* - openai: OpenAI client instance
|
|
251
|
+
* - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
|
|
252
|
+
* - systemText: string of system / instructions text
|
|
253
|
+
* - messages: array of { role, content } for the conversation so far
|
|
254
|
+
* - assistant: JOE `ai_assistant` object (may contain `tools`)
|
|
255
|
+
* - req: Express request (passed into MCP tools as context)
|
|
256
|
+
*
|
|
257
|
+
* Returns:
|
|
258
|
+
* - { response, finalText, messages, toolCalls }
|
|
259
|
+
* where `finalText` is the assistant-facing text (from output_text)
|
|
260
|
+
* and `messages` is the possibly-extended message list including
|
|
261
|
+
* any synthetic `tool` messages.
|
|
262
|
+
*/
|
|
263
|
+
async function runWithTools(opts) {
|
|
264
|
+
const openai = opts.openai;
|
|
265
|
+
const model = opts.model;
|
|
266
|
+
const systemText = opts.systemText || "";
|
|
267
|
+
const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
|
|
268
|
+
const assistant = opts.assistant || null;
|
|
269
|
+
const req = opts.req;
|
|
270
|
+
const attachmentsMode = opts.attachments_mode || null;
|
|
271
|
+
const openaiFileIds = opts.openai_file_ids || null;
|
|
272
|
+
|
|
273
|
+
// Normalize tools: in many schemas tools may be stored as a JSON string;
|
|
274
|
+
// here we accept either an array or a JSON-stringified array.
|
|
275
|
+
let tools = null;
|
|
276
|
+
if (assistant && assistant.tools) {
|
|
277
|
+
if (Array.isArray(assistant.tools)) {
|
|
278
|
+
tools = assistant.tools;
|
|
279
|
+
} else if (typeof assistant.tools === 'string') {
|
|
280
|
+
try {
|
|
281
|
+
const parsed = JSON.parse(assistant.tools);
|
|
282
|
+
if (Array.isArray(parsed)) {
|
|
283
|
+
tools = parsed;
|
|
284
|
+
}
|
|
285
|
+
} catch (e) {
|
|
286
|
+
console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
// Normalize tool definitions for the Responses API. The assistant UI
|
|
291
|
+
// uses the Assistants-style shape ({ type:'function', function:{...} }),
|
|
292
|
+
// but Responses expects the name/description/parameters at the top level:
|
|
293
|
+
// { type:'function', name:'x', description:'...', parameters:{...} }
|
|
294
|
+
if (Array.isArray(tools)) {
|
|
295
|
+
tools = tools.map(function (t) {
|
|
296
|
+
if (t && t.type === 'function' && t.function && !t.name) {
|
|
297
|
+
const fn = t.function || {};
|
|
298
|
+
return {
|
|
299
|
+
type: 'function',
|
|
300
|
+
name: fn.name,
|
|
301
|
+
description: fn.description,
|
|
302
|
+
parameters: fn.parameters || {}
|
|
303
|
+
};
|
|
304
|
+
}
|
|
305
|
+
return t;
|
|
306
|
+
});
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// No tools configured – do a simple single Responses call.
|
|
310
|
+
if (!tools) {
|
|
311
|
+
const resp = await openai.responses.create({
|
|
312
|
+
model: model,
|
|
313
|
+
instructions: systemText,
|
|
314
|
+
input: messages
|
|
315
|
+
});
|
|
316
|
+
return {
|
|
317
|
+
response: resp,
|
|
318
|
+
finalText: resp.output_text || "",
|
|
319
|
+
messages: messages,
|
|
320
|
+
toolCalls: []
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// Step 1: call the model with tools enabled.
|
|
325
|
+
let firstPayload = {
|
|
326
|
+
model: model,
|
|
327
|
+
instructions: systemText,
|
|
328
|
+
input: messages,
|
|
329
|
+
tools: tools,
|
|
330
|
+
tool_choice: "auto"
|
|
331
|
+
};
|
|
332
|
+
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
|
|
333
|
+
try{
|
|
334
|
+
firstPayload = await attachFilesToResponsesPayload(openai, firstPayload, {
|
|
335
|
+
attachments_mode: attachmentsMode,
|
|
336
|
+
openai_file_ids: openaiFileIds
|
|
337
|
+
});
|
|
338
|
+
}catch(e){
|
|
339
|
+
console.warn('[chatgpt] runWithTools attachments failed; continuing without attachments', e && e.message || e);
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
const first = await openai.responses.create(firstPayload);
|
|
343
|
+
|
|
344
|
+
const toolCalls = extractToolCalls(first);
|
|
345
|
+
|
|
346
|
+
// If the model didn't decide to use tools, just return the first answer.
|
|
347
|
+
if (!toolCalls.length) {
|
|
348
|
+
return {
|
|
349
|
+
response: first,
|
|
350
|
+
finalText: first.output_text || "",
|
|
351
|
+
messages: messages,
|
|
352
|
+
toolCalls: []
|
|
353
|
+
};
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// Step 2: execute each tool call via MCP and append tool results.
|
|
357
|
+
for (let i = 0; i < toolCalls.length; i++) {
|
|
358
|
+
const tc = toolCalls[i];
|
|
359
|
+
try {
|
|
360
|
+
const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
|
|
361
|
+
messages.push({
|
|
362
|
+
// Responses API does not support a "tool" role in messages.
|
|
363
|
+
// We inject tool outputs as a synthetic system message so
|
|
364
|
+
// the model can see the results without affecting the
|
|
365
|
+
// user/assistant turn structure.
|
|
366
|
+
role: "system",
|
|
367
|
+
content: JSON.stringify({ tool: tc.name, result: result })
|
|
368
|
+
});
|
|
369
|
+
} catch (e) {
|
|
370
|
+
console.error("[chatgpt] MCP tool error in runWithTools:", e);
|
|
371
|
+
messages.push({
|
|
372
|
+
role: "system",
|
|
373
|
+
content: JSON.stringify({
|
|
374
|
+
tool: tc.name,
|
|
375
|
+
error: e && e.message || "Tool execution failed"
|
|
376
|
+
})
|
|
377
|
+
});
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// Step 3: ask the model again with tool outputs included.
|
|
382
|
+
let finalMessages = messages;
|
|
383
|
+
let second;
|
|
384
|
+
try {
|
|
385
|
+
let secondPayload = {
|
|
386
|
+
model: model,
|
|
387
|
+
instructions: systemText,
|
|
388
|
+
input: finalMessages
|
|
389
|
+
};
|
|
390
|
+
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
|
|
391
|
+
try{
|
|
392
|
+
secondPayload = await attachFilesToResponsesPayload(openai, secondPayload, {
|
|
393
|
+
attachments_mode: attachmentsMode,
|
|
394
|
+
openai_file_ids: openaiFileIds
|
|
395
|
+
});
|
|
396
|
+
}catch(e){
|
|
397
|
+
console.warn('[chatgpt] runWithTools second-call attachments failed; continuing without attachments', e && e.message || e);
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
second = await openai.responses.create(secondPayload);
|
|
401
|
+
} catch (e) {
|
|
402
|
+
if (isTokenLimitError(e)) {
|
|
403
|
+
console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
|
|
404
|
+
const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
|
|
405
|
+
// If nothing was shrunk, just rethrow the original error.
|
|
406
|
+
if (shrunk === finalMessages) {
|
|
407
|
+
throw e;
|
|
408
|
+
}
|
|
409
|
+
finalMessages = shrunk;
|
|
410
|
+
// Retry once with the smaller payload; let any error bubble up.
|
|
411
|
+
let retryPayload = {
|
|
412
|
+
model: model,
|
|
413
|
+
instructions: systemText,
|
|
414
|
+
input: finalMessages
|
|
415
|
+
};
|
|
416
|
+
if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
|
|
417
|
+
try{
|
|
418
|
+
retryPayload = await attachFilesToResponsesPayload(openai, retryPayload, {
|
|
419
|
+
attachments_mode: attachmentsMode,
|
|
420
|
+
openai_file_ids: openaiFileIds
|
|
421
|
+
});
|
|
422
|
+
}catch(e2){
|
|
423
|
+
console.warn('[chatgpt] runWithTools retry attachments failed; continuing without attachments', e2 && e2.message || e2);
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
second = await openai.responses.create(retryPayload);
|
|
427
|
+
} else {
|
|
428
|
+
throw e;
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
return {
|
|
433
|
+
response: second,
|
|
434
|
+
finalText: second.output_text || "",
|
|
435
|
+
messages: finalMessages,
|
|
436
|
+
toolCalls: toolCalls
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
// function newClient(){
|
|
441
|
+
// var key = getAPIKey();
|
|
442
|
+
// var c = new OpenAI({
|
|
443
|
+
// apiKey: key, // This is the default and can be omitted
|
|
444
|
+
// });
|
|
445
|
+
// if(!c || !c.apiKey){
|
|
446
|
+
// return { errors: 'No API key provided' };
|
|
447
|
+
// }
|
|
448
|
+
// return c;
|
|
449
|
+
// }
|
|
450
|
+
function newClient() {
|
|
451
|
+
return new OpenAI({ apiKey: getAPIKey() });
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
// Safely call Responses API with optional temperature/top_p.
|
|
455
|
+
// If the model rejects these parameters, strip and retry once.
|
|
456
|
+
async function safeResponsesCreate(openai, payload){
|
|
457
|
+
try{
|
|
458
|
+
return await openai.responses.create(payload);
|
|
459
|
+
}catch(e){
|
|
460
|
+
try{
|
|
461
|
+
var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
|
|
462
|
+
var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
|
|
463
|
+
var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
|
|
464
|
+
var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
|
|
465
|
+
var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
|
|
466
|
+
if (badTemp || badTopP || unknownTemp || unknownTopP){
|
|
467
|
+
var p2 = Object.assign({}, payload);
|
|
468
|
+
if (p2.hasOwnProperty('temperature')) delete p2.temperature;
|
|
469
|
+
if (p2.hasOwnProperty('top_p')) delete p2.top_p;
|
|
470
|
+
console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
|
|
471
|
+
return await openai.responses.create(p2);
|
|
472
|
+
}
|
|
473
|
+
}catch(_e){ /* fallthrough */ }
|
|
474
|
+
throw e;
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
|
|
479
|
+
async function ensureVectorStoreForFiles(fileIds = []){
|
|
480
|
+
const openai = newClient();
|
|
481
|
+
// Create ephemeral store per run (could be optimized to reuse/persist later)
|
|
482
|
+
const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
|
|
483
|
+
const storeId = vs.id;
|
|
484
|
+
// Link files by id
|
|
485
|
+
for (const fid of (fileIds||[]).slice(0,10)) {
|
|
486
|
+
try{
|
|
487
|
+
await openai.vectorStores.files.create(storeId, { file_id: fid });
|
|
488
|
+
}catch(e){
|
|
489
|
+
console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
// Poll (best-effort) until files are processed or timeout
|
|
493
|
+
const timeoutMs = 8000;
|
|
494
|
+
const start = Date.now();
|
|
495
|
+
try{
|
|
496
|
+
while(Date.now() - start < timeoutMs){
|
|
497
|
+
const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
|
|
498
|
+
const items = (listed && listed.data) || [];
|
|
499
|
+
const pending = items.some(f => f.status && f.status !== 'completed');
|
|
500
|
+
if(!pending){ break; }
|
|
501
|
+
await new Promise(r => setTimeout(r, 500));
|
|
502
|
+
}
|
|
503
|
+
}catch(_e){ /* non-fatal */ }
|
|
504
|
+
return { vectorStoreId: storeId };
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
// ---------------- OpenAI Files helpers ----------------
|
|
508
|
+
/**
|
|
509
|
+
* attachFilesToResponsesPayload
|
|
510
|
+
*
|
|
511
|
+
* Shared helper to wire OpenAI `responses.create` payloads with file
|
|
512
|
+
* attachments in a consistent way for both MCP and non‑MCP paths.
|
|
513
|
+
*
|
|
514
|
+
* Modes:
|
|
515
|
+
* - attachments_mode === 'file_search':
|
|
516
|
+
* - Ensures a temporary vector store via ensureVectorStoreForFiles.
|
|
517
|
+
* - Adds a `file_search` tool to payload.tools (if not already present).
|
|
518
|
+
* - Sets payload.tool_resources.file_search.vector_store_ids.
|
|
519
|
+
* - Leaves payload.input as text/messages.
|
|
520
|
+
*
|
|
521
|
+
* - attachments_mode === 'direct' (default):
|
|
522
|
+
* - Converts the existing `input` string (if any) into an `input_text`
|
|
523
|
+
* part and appends up to 10 `{ type:'input_file', file_id }` parts.
|
|
524
|
+
* - Sets payload.input = [{ role:'user', content: parts }].
|
|
525
|
+
*
|
|
526
|
+
* This function is intentionally file‑only; it does not modify instructions
|
|
527
|
+
* or other payload fields.
|
|
528
|
+
*/
|
|
529
|
+
async function attachFilesToResponsesPayload(openai, payload, opts){
|
|
530
|
+
const mode = (opts && opts.attachments_mode) || 'direct';
|
|
531
|
+
const fileIds = (opts && opts.openai_file_ids) || [];
|
|
532
|
+
if (!Array.isArray(fileIds) || !fileIds.length) {
|
|
533
|
+
return payload;
|
|
534
|
+
}
|
|
535
|
+
if (mode === 'file_search') {
|
|
536
|
+
const ensured = await ensureVectorStoreForFiles(fileIds);
|
|
537
|
+
payload.tools = payload.tools || [];
|
|
538
|
+
if (!payload.tools.find(function(t){ return t && t.type === 'file_search'; })) {
|
|
539
|
+
payload.tools.push({ type:'file_search' });
|
|
540
|
+
}
|
|
541
|
+
payload.tool_resources = Object.assign({}, payload.tool_resources, {
|
|
542
|
+
file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
|
|
543
|
+
});
|
|
544
|
+
return payload;
|
|
545
|
+
}
|
|
546
|
+
// Default: direct context stuffing using input_text + input_file parts.
|
|
547
|
+
const parts = [];
|
|
548
|
+
if (typeof payload.input === 'string' && payload.input.trim().length) {
|
|
549
|
+
parts.push({ type:'input_text', text: String(payload.input) });
|
|
550
|
+
} else if (Array.isArray(payload.input)) {
|
|
551
|
+
// If caller already provided messages as input, preserve them by
|
|
552
|
+
// flattening into input_text where possible.
|
|
553
|
+
try{
|
|
554
|
+
const txt = JSON.stringify(payload.input);
|
|
555
|
+
if (txt && txt.length) {
|
|
556
|
+
parts.push({ type:'input_text', text: txt });
|
|
557
|
+
}
|
|
558
|
+
}catch(_e){}
|
|
559
|
+
}
|
|
560
|
+
fileIds.slice(0, 10).forEach(function(fid){
|
|
561
|
+
if (fid) {
|
|
562
|
+
parts.push({ type:'input_file', file_id: fid });
|
|
563
|
+
}
|
|
564
|
+
});
|
|
565
|
+
payload.input = [{ role:'user', content: parts }];
|
|
566
|
+
return payload;
|
|
567
|
+
}
|
|
568
|
+
async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
|
|
569
|
+
const openai = newClient();
|
|
570
|
+
const usePurpose = purpose || 'assistants';
|
|
571
|
+
const tmpDir = os.tmpdir();
|
|
572
|
+
const safeName = filename || ('upload_' + Date.now());
|
|
573
|
+
const tmpPath = path.join(tmpDir, safeName);
|
|
574
|
+
await fs.promises.writeFile(tmpPath, buffer);
|
|
575
|
+
try {
|
|
576
|
+
// openai.files.create accepts a readable stream
|
|
577
|
+
const fileStream = fs.createReadStream(tmpPath);
|
|
578
|
+
const created = await openai.files.create({
|
|
579
|
+
purpose: usePurpose,
|
|
580
|
+
file: fileStream
|
|
581
|
+
});
|
|
582
|
+
return { id: created.id, purpose: usePurpose };
|
|
583
|
+
} finally {
|
|
584
|
+
// best-effort cleanup
|
|
585
|
+
fs.promises.unlink(tmpPath).catch(()=>{});
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
// Expose a helper that other plugins can call in-process
|
|
590
|
+
this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
|
|
591
|
+
if (!buffer || !buffer.length) {
|
|
592
|
+
throw new Error('Missing buffer');
|
|
593
|
+
}
|
|
594
|
+
return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
|
|
595
|
+
};
|
|
596
|
+
|
|
597
|
+
// Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
|
|
598
|
+
this.filesRetryFromUrl = async function (data, req, res) {
|
|
599
|
+
try {
|
|
600
|
+
const { default: got } = await import('got');
|
|
601
|
+
const url = data && (data.url || data.location);
|
|
602
|
+
const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
|
|
603
|
+
const contentType = data && data.contentType || undefined;
|
|
604
|
+
const purpose = 'assistants';
|
|
605
|
+
if (!url) {
|
|
606
|
+
return { success: false, error: 'Missing url' };
|
|
607
|
+
}
|
|
608
|
+
const resp = await got(url, { responseType: 'buffer' });
|
|
609
|
+
const buffer = resp.body;
|
|
610
|
+
const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
|
|
611
|
+
return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
|
|
612
|
+
} catch (e) {
|
|
613
|
+
return { success: false, error: e && e.message || 'Retry upload failed' };
|
|
614
|
+
}
|
|
615
|
+
};
|
|
616
|
+
this.testPrompt= async function(data, req, res) {
|
|
617
|
+
try {
|
|
618
|
+
var payload = {
|
|
619
|
+
params: req.params,
|
|
620
|
+
data: data
|
|
621
|
+
};
|
|
622
|
+
} catch (e) {
|
|
623
|
+
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
|
|
624
|
+
}
|
|
625
|
+
const client = newClient();
|
|
626
|
+
if(client.errors){
|
|
627
|
+
return { errors: client.errors };
|
|
628
|
+
}
|
|
629
|
+
try {
|
|
630
|
+
const chatCompletion = await client.chat.completions.create({
|
|
631
|
+
messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
|
|
632
|
+
model: 'gpt-4o',
|
|
633
|
+
});
|
|
634
|
+
coloredLog(chatCompletion);
|
|
635
|
+
const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
|
|
636
|
+
// Optionally persist as ai_response with parsed JSON when applicable
|
|
637
|
+
const parsed = (function(){
|
|
638
|
+
try {
|
|
639
|
+
const jt = extractJsonText(text);
|
|
640
|
+
return jt ? JSON.parse(jt) : null;
|
|
641
|
+
} catch(_e){ return null; }
|
|
642
|
+
})();
|
|
643
|
+
try {
|
|
644
|
+
var creator_type = null;
|
|
645
|
+
var creator_id = null;
|
|
646
|
+
try{
|
|
647
|
+
var u = req && req.User;
|
|
648
|
+
if (u && u._id){
|
|
649
|
+
creator_type = 'user';
|
|
650
|
+
creator_id = u._id;
|
|
651
|
+
}
|
|
652
|
+
}catch(_e){}
|
|
653
|
+
const aiResponse = {
|
|
654
|
+
itemtype: 'ai_response',
|
|
655
|
+
name: 'Test Prompt → ChatGPT',
|
|
656
|
+
response_type: 'testPrompt',
|
|
657
|
+
response: text,
|
|
658
|
+
response_json: parsed,
|
|
659
|
+
response_id: chatCompletion.id || '',
|
|
660
|
+
user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
|
|
661
|
+
model_used: 'gpt-4o',
|
|
662
|
+
created: (new Date()).toISOString(),
|
|
663
|
+
creator_type: creator_type,
|
|
664
|
+
creator_id: creator_id
|
|
665
|
+
};
|
|
666
|
+
JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
|
|
667
|
+
} catch(_e){ /* best-effort only */ }
|
|
668
|
+
return {payload,chatCompletion,content:text};
|
|
669
|
+
} catch (error) {
|
|
670
|
+
if (error.status === 429) {
|
|
671
|
+
return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
|
|
672
|
+
} else {
|
|
673
|
+
return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
this.sendInitialConsultTranscript= async function(data, req, res) {
|
|
679
|
+
coloredLog("sendInitialConsultTranscript");
|
|
680
|
+
//get the prompt object from the prompt id
|
|
681
|
+
//get the business object from the refrenced object id
|
|
682
|
+
//see if there is a initial_transcript_url property on that object
|
|
683
|
+
//if there is, get the content of the file
|
|
684
|
+
//send the content to chatgpt, with the template property of the prompt object
|
|
685
|
+
//get the response
|
|
686
|
+
try {
|
|
687
|
+
var payload = {
|
|
688
|
+
params: req.params,
|
|
689
|
+
data: data
|
|
690
|
+
};
|
|
691
|
+
} catch (e) {
|
|
692
|
+
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
|
|
693
|
+
}
|
|
694
|
+
var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
|
|
695
|
+
var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
// See if there is an initial_transcript_url property on that object
|
|
699
|
+
const transcriptUrl = businessOBJ.initial_transcript_url;
|
|
700
|
+
if (!transcriptUrl) {
|
|
701
|
+
return res.jsonp({ error: 'No initial transcript URL found' });
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
//Get the content of the file from Google Docs
|
|
705
|
+
const transcriptContent = await getGoogleDocContent(transcriptUrl);
|
|
706
|
+
if (!transcriptContent || transcriptContent.error) {
|
|
707
|
+
return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
|
|
708
|
+
}
|
|
709
|
+
const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
|
|
710
|
+
payload.tokenCount = tokenCount;
|
|
711
|
+
coloredLog("token count: "+tokenCount);
|
|
712
|
+
//return res.jsonp({tokens:tokenCount,content:transcriptContent});
|
|
713
|
+
// Send the content to ChatGPT, with the template property of the prompt object
|
|
714
|
+
const client = new OpenAI({
|
|
715
|
+
apiKey: getAPIKey(), // This is the default and can be omitted
|
|
716
|
+
});
|
|
717
|
+
|
|
718
|
+
const chatResponse = await client.chat.completions.create({
|
|
719
|
+
messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
|
|
720
|
+
model: 'gpt-4o',
|
|
721
|
+
});
|
|
722
|
+
|
|
723
|
+
// Get the response
|
|
724
|
+
const chatContent = chatResponse.choices[0].message.content;
|
|
725
|
+
const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
|
|
726
|
+
// Save the response
|
|
727
|
+
await saveAIResponse({
|
|
728
|
+
name:responseName,
|
|
729
|
+
business: data.business,
|
|
730
|
+
ai_prompt: data.ai_prompt,
|
|
731
|
+
response: chatContent,
|
|
732
|
+
payload,
|
|
733
|
+
prompt_method:req.params.method
|
|
734
|
+
}, req && req.User);
|
|
735
|
+
coloredLog("response saved -"+responseName);
|
|
736
|
+
return {payload,
|
|
737
|
+
businessOBJ,
|
|
738
|
+
promptOBJ,
|
|
739
|
+
chatContent,
|
|
740
|
+
responseName
|
|
741
|
+
};
|
|
742
|
+
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
async function getGoogleDocContent(docUrl) {
|
|
746
|
+
try {
|
|
747
|
+
const auth = new google.auth.GoogleAuth({
|
|
748
|
+
scopes: ['https://www.googleapis.com/auth/documents.readonly']
|
|
749
|
+
});
|
|
750
|
+
//get google docs apikey from settings
|
|
751
|
+
const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
|
|
752
|
+
const docs = google.docs({ version: 'v1', auth:google_auth });
|
|
753
|
+
const docId = extractDocIdFromUrl(docUrl);
|
|
754
|
+
const doc = await docs.documents.get({ documentId: docId });
|
|
755
|
+
|
|
756
|
+
let content = doc.data.body.content.map(element => {
|
|
757
|
+
if (element.paragraph && element.paragraph.elements) {
|
|
758
|
+
return element.paragraph.elements.map(
|
|
759
|
+
e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
|
|
760
|
+
: ''
|
|
761
|
+
).join('');
|
|
762
|
+
}
|
|
763
|
+
return '';
|
|
764
|
+
}).join('\n');
|
|
765
|
+
|
|
766
|
+
// Remove timestamps and line numbers
|
|
767
|
+
//content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
|
|
768
|
+
|
|
769
|
+
return content;
|
|
770
|
+
} catch (error) {
|
|
771
|
+
console.error('Error fetching Google Doc content:', error);
|
|
772
|
+
return {error};
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
function countTokens(text, model = 'gpt-4o') {
|
|
776
|
+
const enc = encoding_for_model(model);
|
|
777
|
+
const tokens = enc.encode(text);
|
|
778
|
+
return tokens.length;
|
|
779
|
+
}
|
|
780
|
+
function extractDocIdFromUrl(url) {
|
|
781
|
+
const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
|
|
782
|
+
return match ? match[1] : null;
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
async function saveAIResponse(data, user) {
|
|
786
|
+
try {
|
|
787
|
+
var creator_type = null;
|
|
788
|
+
var creator_id = null;
|
|
789
|
+
try{
|
|
790
|
+
if (user && user._id){
|
|
791
|
+
creator_type = 'user';
|
|
792
|
+
creator_id = user._id;
|
|
793
|
+
}
|
|
794
|
+
}catch(_e){}
|
|
795
|
+
const aiResponse = {
|
|
796
|
+
name: data.name,
|
|
797
|
+
itemtype: 'ai_response',
|
|
798
|
+
business: data.business,
|
|
799
|
+
ai_prompt: data.ai_prompt,
|
|
800
|
+
response: data.response,
|
|
801
|
+
payload: data.payload,
|
|
802
|
+
prompt_method:data.prompt_method,
|
|
803
|
+
created: (new Date).toISOString(),
|
|
804
|
+
_id:cuid(),
|
|
805
|
+
creator_type: creator_type,
|
|
806
|
+
creator_id: creator_id
|
|
807
|
+
// Add any other fields you want to save
|
|
808
|
+
};
|
|
809
|
+
await new Promise((resolve, reject) => {
|
|
810
|
+
JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
|
|
811
|
+
if (err) {
|
|
812
|
+
coloredLog('Error saving AI response: ' + err);
|
|
813
|
+
reject(err);
|
|
814
|
+
} else {
|
|
815
|
+
coloredLog('AI response saved successfully');
|
|
816
|
+
resolve(result);
|
|
817
|
+
}
|
|
818
|
+
});
|
|
819
|
+
});
|
|
820
|
+
} catch (error) {
|
|
821
|
+
coloredLog('Error in saveAIResponse: ' + error);
|
|
822
|
+
}
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
// Normalize model output that should contain JSON. Models often wrap JSON
|
|
826
|
+
// in markdown fences (```json ... ```), and may prepend/append prose. This
|
|
827
|
+
// helper strips fences and tries to isolate the first well-formed JSON
|
|
828
|
+
// object/array substring so JSON.parse has the best chance of succeeding.
|
|
829
|
+
function extractJsonText(raw) {
|
|
830
|
+
if (!raw) { return ''; }
|
|
831
|
+
let t = String(raw).trim();
|
|
832
|
+
// If there is any ```...``` fenced block, prefer its contents.
|
|
833
|
+
const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
|
|
834
|
+
if (fenceIdx !== -1) {
|
|
835
|
+
let start = fenceIdx;
|
|
836
|
+
const firstNewline = t.indexOf('\n', start);
|
|
837
|
+
if (firstNewline !== -1) {
|
|
838
|
+
t = t.substring(firstNewline + 1);
|
|
839
|
+
} else {
|
|
840
|
+
t = t.substring(start + 3);
|
|
841
|
+
}
|
|
842
|
+
const lastFence = t.lastIndexOf('```');
|
|
843
|
+
if (lastFence !== -1) {
|
|
844
|
+
t = t.substring(0, lastFence);
|
|
845
|
+
}
|
|
846
|
+
t = t.trim();
|
|
847
|
+
}
|
|
848
|
+
// If there's extra prose around the JSON, slice from first {/[ to last }/]
|
|
849
|
+
if (t[0] !== '{' && t[0] !== '[') {
|
|
850
|
+
const firstBrace = t.indexOf('{');
|
|
851
|
+
const firstBracket = t.indexOf('[');
|
|
852
|
+
let first = -1;
|
|
853
|
+
if (firstBrace === -1) { first = firstBracket; }
|
|
854
|
+
else if (firstBracket === -1) { first = firstBrace; }
|
|
855
|
+
else { first = Math.min(firstBrace, firstBracket); }
|
|
856
|
+
const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
|
|
857
|
+
if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
|
|
858
|
+
t = t.slice(first, lastBrace + 1);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
return t.trim();
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
// Autofill feature (Responses API; supports assistant_id or model)
|
|
865
|
+
this.autofill = async function (data, req, res) {
|
|
866
|
+
const startedAt = Date.now();
|
|
867
|
+
try {
|
|
868
|
+
const body = data || {};
|
|
869
|
+
const objectId = body.object_id || body._id;
|
|
870
|
+
const object = body.object || $J.get(objectId);
|
|
871
|
+
const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
|
|
872
|
+
const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
|
|
873
|
+
const rawFields = body.fields || body.field;
|
|
874
|
+
const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
|
|
875
|
+
const userPrompt = body.prompt || '';
|
|
876
|
+
const assistantId = body.assistant_id || null;
|
|
877
|
+
|
|
878
|
+
if (!object) {
|
|
879
|
+
return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
|
|
880
|
+
}
|
|
881
|
+
if (!schemaName) {
|
|
882
|
+
return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
|
|
883
|
+
}
|
|
884
|
+
if (!fields.length) {
|
|
885
|
+
return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
|
|
886
|
+
}
|
|
887
|
+
|
|
888
|
+
const flattened = JOE.Utils.flattenObject(object._id);
|
|
889
|
+
const systemText = [
|
|
890
|
+
'You are JOE (Json Object Editor) assistant.',
|
|
891
|
+
'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
|
|
892
|
+
'- Respect field types (text, number, arrays, enums, references).',
|
|
893
|
+
'- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
|
|
894
|
+
'- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
|
|
895
|
+
'- If a field is an array, return an array of values.',
|
|
896
|
+
'- Never modify unrelated fields.',
|
|
897
|
+
'- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
|
|
898
|
+
'- If you lack sufficient information, return an empty patch.'
|
|
899
|
+
].join('\\n');
|
|
900
|
+
|
|
901
|
+
const schemaForContext = schemaSummary || schemaFull || {};
|
|
902
|
+
const userInput = JSON.stringify({
|
|
903
|
+
action: 'autofill_fields',
|
|
904
|
+
target_schema: schemaName,
|
|
905
|
+
requested_fields: fields,
|
|
906
|
+
user_prompt: userPrompt,
|
|
907
|
+
object_context: flattened,
|
|
908
|
+
schema_context: schemaForContext
|
|
909
|
+
}, null, ' ');
|
|
910
|
+
|
|
911
|
+
const openai = newClient();
|
|
912
|
+
const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
|
|
913
|
+
|
|
914
|
+
// Normalize MCP options for autofill. By default, when mcp_enabled is
|
|
915
|
+
// true we expose the read-only toolset, which is safe for field
|
|
916
|
+
// suggestions. Callers can override toolset / selected tools.
|
|
917
|
+
const mcpEnabled = !!body.mcp_enabled;
|
|
918
|
+
const mcpToolset = body.mcp_toolset || 'read-only';
|
|
919
|
+
const mcpSelected = Array.isArray(body.mcp_selected_tools) ? body.mcp_selected_tools : null;
|
|
920
|
+
const mcpInstructionsMode = body.mcp_instructions_mode || 'auto';
|
|
921
|
+
|
|
922
|
+
let response;
|
|
923
|
+
let mcpToolCalls = [];
|
|
924
|
+
if (mcpEnabled) {
|
|
925
|
+
const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
|
|
926
|
+
const toolsForModel = MCP.getToolDefinitions(toolNames);
|
|
927
|
+
const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
|
|
928
|
+
const systemTextWithMcp = [systemText, mcpText || ''].join('\n').trim();
|
|
929
|
+
|
|
930
|
+
const messages = [{ role:'user', content:userInput }];
|
|
931
|
+
|
|
932
|
+
const runResult = await runWithTools({
|
|
933
|
+
openai: openai,
|
|
934
|
+
model: model,
|
|
935
|
+
systemText: systemTextWithMcp,
|
|
936
|
+
messages: messages,
|
|
937
|
+
assistant: { tools: toolsForModel },
|
|
938
|
+
req: req
|
|
939
|
+
});
|
|
940
|
+
response = runResult.response;
|
|
941
|
+
if (runResult && Array.isArray(runResult.toolCalls)) {
|
|
942
|
+
mcpToolCalls = runResult.toolCalls.map(function(tc){
|
|
943
|
+
return {
|
|
944
|
+
name: tc && (tc.name || tc.function_name || tc.tool_name),
|
|
945
|
+
arguments: tc && tc.arguments
|
|
946
|
+
};
|
|
947
|
+
}).filter(function(x){ return x && x.name; });
|
|
948
|
+
}
|
|
949
|
+
} else {
|
|
950
|
+
// For simplicity and robustness, use plain text output and instruct the
|
|
951
|
+
// model to return a strict JSON object. We previously attempted the
|
|
952
|
+
// Responses `json_schema` response_format, but the SDK shape can change
|
|
953
|
+
// and is harder to parse reliably; text + JSON.parse is sufficient here.
|
|
954
|
+
const requestBase = {
|
|
955
|
+
temperature: 0.2,
|
|
956
|
+
instructions: systemText,
|
|
957
|
+
input: userInput
|
|
958
|
+
};
|
|
959
|
+
// Optional web_search tool: if the caller sets allow_web truthy, expose
|
|
960
|
+
// the built-in web_search capability and let the model decide when to
|
|
961
|
+
// call it.
|
|
962
|
+
if (body.allow_web) {
|
|
963
|
+
coloredLog("allowing web search");
|
|
964
|
+
requestBase.tools = [{ type: 'web_search' }];
|
|
965
|
+
requestBase.tool_choice = 'auto';
|
|
966
|
+
}
|
|
967
|
+
|
|
968
|
+
if (assistantId) {
|
|
969
|
+
response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
|
|
970
|
+
} else {
|
|
971
|
+
response = await openai.responses.create({ model, ...requestBase });
|
|
972
|
+
}
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
let textOut = '';
|
|
976
|
+
try { textOut = response.output_text || ''; } catch (_e) {}
|
|
977
|
+
coloredLog("textOut: "+textOut);
|
|
978
|
+
if (!textOut && response && Array.isArray(response.output)) {
|
|
979
|
+
for (let i = 0; i < response.output.length; i++) {
|
|
980
|
+
const item = response.output[i];
|
|
981
|
+
if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
|
|
982
|
+
const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
|
|
983
|
+
if (textPart && (textPart.text || textPart.output_text)) {
|
|
984
|
+
textOut = textPart.text || textPart.output_text;
|
|
985
|
+
break;
|
|
986
|
+
}
|
|
987
|
+
}
|
|
988
|
+
}
|
|
989
|
+
}
|
|
990
|
+
|
|
991
|
+
let patch = {};
|
|
992
|
+
try {
|
|
993
|
+
const jsonText = extractJsonText(textOut);
|
|
994
|
+
const parsed = JSON.parse(jsonText || '{}');
|
|
995
|
+
patch = parsed.patch || {};
|
|
996
|
+
} catch (_e) {
|
|
997
|
+
console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
|
|
998
|
+
}
|
|
999
|
+
coloredLog("patch: "+JSON.stringify(patch));
|
|
1000
|
+
const filteredPatch = {};
|
|
1001
|
+
fields.forEach(function (f) {
|
|
1002
|
+
if (Object.prototype.hasOwnProperty.call(patch, f)) {
|
|
1003
|
+
filteredPatch[f] = patch[f];
|
|
1004
|
+
}
|
|
1005
|
+
});
|
|
1006
|
+
// If we got no fields back on the first attempt, retry once before
|
|
1007
|
+
// giving up. Avoid infinite loops by marking a retry flag.
|
|
1008
|
+
if (!Object.keys(filteredPatch).length && !body._retry) {
|
|
1009
|
+
coloredLog('[autofill] empty patch, retrying once');
|
|
1010
|
+
const retryBody = Object.assign({}, body, { _retry: true });
|
|
1011
|
+
return await self.autofill(retryBody, req, res);
|
|
1012
|
+
}
|
|
1013
|
+
|
|
1014
|
+
// Optional save
|
|
1015
|
+
let savedItem = null;
|
|
1016
|
+
if (body.save_history || body.save_itemtype) {
|
|
1017
|
+
const targetItemtype = body.save_itemtype || 'ai_response';
|
|
1018
|
+
if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
|
|
1019
|
+
const isAiResponse = (targetItemtype === 'ai_response');
|
|
1020
|
+
const toolNamesForSave = mcpEnabled ? MCP.getToolNamesForToolset(mcpToolset, mcpSelected) : [];
|
|
1021
|
+
const baseSave = {
|
|
1022
|
+
itemtype: targetItemtype,
|
|
1023
|
+
name: `[${schemaName}] autofill → ${fields.join(', ')}`,
|
|
1024
|
+
object_id: object._id,
|
|
1025
|
+
target_schema: schemaName,
|
|
1026
|
+
fields,
|
|
1027
|
+
prompt: userPrompt,
|
|
1028
|
+
patch: filteredPatch,
|
|
1029
|
+
model,
|
|
1030
|
+
raw: { response, mcp_tools_used: mcpToolCalls }
|
|
1031
|
+
};
|
|
1032
|
+
if (isAiResponse) {
|
|
1033
|
+
baseSave.mcp_enabled = mcpEnabled;
|
|
1034
|
+
baseSave.mcp_toolset = mcpToolset;
|
|
1035
|
+
baseSave.mcp_selected_tools = toolNamesForSave;
|
|
1036
|
+
baseSave.mcp_instructions_mode = mcpInstructionsMode;
|
|
1037
|
+
baseSave.mcp_tools_used = mcpToolCalls;
|
|
1038
|
+
}
|
|
1039
|
+
await new Promise(function (resolve) {
|
|
1040
|
+
JOE.Storage.save(baseSave, targetItemtype, function (_err, saved) {
|
|
1041
|
+
savedItem = saved || null;
|
|
1042
|
+
resolve();
|
|
1043
|
+
});
|
|
1044
|
+
});
|
|
1045
|
+
}
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
return {
|
|
1049
|
+
success: true,
|
|
1050
|
+
patch: filteredPatch,
|
|
1051
|
+
model,
|
|
1052
|
+
usage: response && response.usage,
|
|
1053
|
+
saved: !!savedItem,
|
|
1054
|
+
saved_item: savedItem,
|
|
1055
|
+
elapsed_ms: Date.now() - startedAt
|
|
1056
|
+
};
|
|
1057
|
+
} catch (e) {
|
|
1058
|
+
return { success: false, error: e && e.message || 'Unknown error' };
|
|
1059
|
+
}
|
|
1060
|
+
};
|
|
1061
|
+
|
|
1062
|
+
this.getResponse = function(data, req, res) {
|
|
1063
|
+
try {
|
|
1064
|
+
var prompt = data.prompt;
|
|
1065
|
+
if (!prompt) {
|
|
1066
|
+
return { error: 'No prompt provided' };
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
// Simulate a response from ChatGPT
|
|
1070
|
+
var response = `ChatGPT response to: ${prompt}`;
|
|
1071
|
+
res.jsonp({ response: response });
|
|
1072
|
+
return { use_callback: true };
|
|
1073
|
+
} catch (e) {
|
|
1074
|
+
return { errors: 'plugin error: ' + e, failedat: 'plugin' };
|
|
1075
|
+
}
|
|
1076
|
+
};
|
|
1077
|
+
|
|
1078
|
+
this.html = function(data, req, res) {
|
|
1079
|
+
return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
|
|
1080
|
+
};
|
|
1081
|
+
/* NEW AI RESPONSE API*/
|
|
1082
|
+
|
|
1083
|
+
this.executeJOEAiPrompt = async function(data, req, res) {
|
|
1084
|
+
const referencedObjectIds = []; // Track all objects touched during helper function
|
|
1085
|
+
try {
|
|
1086
|
+
const promptId = data.ai_prompt;
|
|
1087
|
+
// Support both payload shapes: { ai_prompt, params:{...}, ... } and flat
|
|
1088
|
+
const params = (data && (data.params || data)) || {};
|
|
1089
|
+
|
|
1090
|
+
if (!promptId) {
|
|
1091
|
+
return { error: "Missing prompt_id." };
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
const prompt = await $J.get(promptId); // Use $J.get for consistency
|
|
1095
|
+
if (!prompt) {
|
|
1096
|
+
return { error: "Prompt not found." };
|
|
1097
|
+
}
|
|
1098
|
+
|
|
1099
|
+
let instructions = prompt.instructions || "";
|
|
1100
|
+
let finalInstructions=instructions;
|
|
1101
|
+
let finalInput='';
|
|
1102
|
+
// Pre-load all content_objects if content_items exist
|
|
1103
|
+
const contentObjects = {};
|
|
1104
|
+
|
|
1105
|
+
if (prompt.content_items && Array.isArray(prompt.content_items)) {
|
|
1106
|
+
for (const content of prompt.content_items) {
|
|
1107
|
+
if (params[content.reference]) {
|
|
1108
|
+
const obj = $J.get(params[content.reference]);
|
|
1109
|
+
if (obj) {
|
|
1110
|
+
contentObjects[content.itemtype] = obj;
|
|
1111
|
+
|
|
1112
|
+
// Pre-track referenced object
|
|
1113
|
+
if (obj._id && !referencedObjectIds.includes(obj._id)) {
|
|
1114
|
+
referencedObjectIds.push(obj._id);
|
|
1115
|
+
}
|
|
1116
|
+
}
|
|
1117
|
+
}
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
|
|
1121
|
+
// Execute any helper functions if present
|
|
1122
|
+
if (prompt.functions) {
|
|
1123
|
+
const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
|
|
1124
|
+
const helperResult = await modFunc({
|
|
1125
|
+
instructions,
|
|
1126
|
+
params,
|
|
1127
|
+
ai_prompt: prompt,
|
|
1128
|
+
content_objects: contentObjects,
|
|
1129
|
+
trackObject: (obj) => {
|
|
1130
|
+
if (obj?._id && !referencedObjectIds.includes(obj._id)) {
|
|
1131
|
+
referencedObjectIds.push(obj._id);
|
|
1132
|
+
}
|
|
1133
|
+
}
|
|
1134
|
+
});
|
|
1135
|
+
|
|
1136
|
+
if (typeof helperResult === 'object' && helperResult.error) {
|
|
1137
|
+
return { error: helperResult.error };
|
|
1138
|
+
}
|
|
1139
|
+
|
|
1140
|
+
// Assume the result is { instructions, input }
|
|
1141
|
+
finalInstructions = helperResult.instructions || instructions;
|
|
1142
|
+
finalInput = helperResult.input;
|
|
1143
|
+
}
|
|
1144
|
+
|
|
1145
|
+
// Build a compact uploaded_files header from any referenced objects that
|
|
1146
|
+
// have uploader-style files with OpenAI ids. This gives the model
|
|
1147
|
+
// explicit metadata about which files were attached and their roles so
|
|
1148
|
+
// prompts (like MCP Tokenize Client) can reason about "transcript"
|
|
1149
|
+
// vs "summary" sources instead of guessing from content alone.
|
|
1150
|
+
let uploadedFilesMeta = [];
|
|
1151
|
+
try{
|
|
1152
|
+
Object.keys(contentObjects || {}).forEach(function(itemtype){
|
|
1153
|
+
const obj = contentObjects[itemtype];
|
|
1154
|
+
if (!obj || typeof obj !== 'object') { return; }
|
|
1155
|
+
Object.keys(obj).forEach(function(field){
|
|
1156
|
+
const val = obj[field];
|
|
1157
|
+
if (!Array.isArray(val)) { return; }
|
|
1158
|
+
val.forEach(function(f){
|
|
1159
|
+
if (f && f.openai_file_id) {
|
|
1160
|
+
uploadedFilesMeta.push({
|
|
1161
|
+
itemtype: itemtype,
|
|
1162
|
+
field: field,
|
|
1163
|
+
name: f.filename || '',
|
|
1164
|
+
role: f.file_role || null,
|
|
1165
|
+
openai_file_id: f.openai_file_id
|
|
1166
|
+
});
|
|
1167
|
+
}
|
|
1168
|
+
});
|
|
1169
|
+
});
|
|
1170
|
+
});
|
|
1171
|
+
}catch(_e){ /* best-effort only */ }
|
|
1172
|
+
if (uploadedFilesMeta.length) {
|
|
1173
|
+
try{
|
|
1174
|
+
const header = { uploaded_files: uploadedFilesMeta };
|
|
1175
|
+
if (finalInput && String(finalInput).trim().length) {
|
|
1176
|
+
finalInput = JSON.stringify({
|
|
1177
|
+
uploaded_files: uploadedFilesMeta,
|
|
1178
|
+
input: finalInput
|
|
1179
|
+
}, null, 2);
|
|
1180
|
+
} else {
|
|
1181
|
+
finalInput = JSON.stringify(header, null, 2);
|
|
1182
|
+
}
|
|
1183
|
+
}catch(_e){ /* if JSON.stringify fails, leave finalInput as-is */ }
|
|
1184
|
+
}
|
|
1185
|
+
|
|
1186
|
+
const openai = newClient(); // however your OpenAI client is created
|
|
1187
|
+
|
|
1188
|
+
// Normalize MCP options from the ai_prompt record.
|
|
1189
|
+
const mcpEnabled = !!prompt.mcp_enabled;
|
|
1190
|
+
const mcpToolset = prompt.mcp_toolset || 'read-only';
|
|
1191
|
+
const mcpSelected = Array.isArray(prompt.mcp_selected_tools) ? prompt.mcp_selected_tools : null;
|
|
1192
|
+
const mcpInstructionsMode = prompt.mcp_instructions_mode || 'auto';
|
|
1193
|
+
|
|
1194
|
+
// If MCP is enabled, prefer Responses+tools via runWithTools. Otherwise,
|
|
1195
|
+
// keep the existing single-call Responses behavior using prompt.tools.
|
|
1196
|
+
let response;
|
|
1197
|
+
let resolvedToolNames = null;
|
|
1198
|
+
let mcpToolCalls = [];
|
|
1199
|
+
if (mcpEnabled) {
|
|
1200
|
+
// Determine tool names from the configured toolset + overrides.
|
|
1201
|
+
const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
|
|
1202
|
+
resolvedToolNames = toolNames;
|
|
1203
|
+
const toolsForModel = MCP.getToolDefinitions(toolNames);
|
|
1204
|
+
|
|
1205
|
+
// Build per-tool MCP instructions (short) and append to the existing instructions.
|
|
1206
|
+
const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
|
|
1207
|
+
const systemText = [finalInstructions || instructions || '']
|
|
1208
|
+
.concat(mcpText ? ['\n', mcpText] : [])
|
|
1209
|
+
.join('\n')
|
|
1210
|
+
.trim();
|
|
1211
|
+
|
|
1212
|
+
const messages = [];
|
|
1213
|
+
if (finalInput && String(finalInput).trim().length) {
|
|
1214
|
+
messages.push({ role:'user', content:String(finalInput) });
|
|
1215
|
+
}
|
|
1216
|
+
// Ensure the Responses API always has some input when MCP is enabled.
|
|
1217
|
+
// For prompts that rely purely on system instructions, synthesize a
|
|
1218
|
+
// minimal user turn so the call remains valid.
|
|
1219
|
+
if (!messages.length) {
|
|
1220
|
+
messages.push({
|
|
1221
|
+
role: 'user',
|
|
1222
|
+
content: 'Follow the system instructions above and produce the requested output.'
|
|
1223
|
+
});
|
|
1224
|
+
}
|
|
1225
|
+
|
|
1226
|
+
const runResult = await runWithTools({
|
|
1227
|
+
openai: openai,
|
|
1228
|
+
model: prompt.ai_model || "gpt-4o",
|
|
1229
|
+
systemText: systemText,
|
|
1230
|
+
messages: messages,
|
|
1231
|
+
// Provide a synthetic assistant-style object so runWithTools can
|
|
1232
|
+
// normalize tools into Responses format.
|
|
1233
|
+
assistant: { tools: toolsForModel },
|
|
1234
|
+
// Pass through attachments so MCP runs see the same files as
|
|
1235
|
+
// non‑MCP prompts (direct or file_search modes).
|
|
1236
|
+
attachments_mode: prompt.attachments_mode || 'direct',
|
|
1237
|
+
openai_file_ids: Array.isArray(data.openai_file_ids) ? data.openai_file_ids : null,
|
|
1238
|
+
req: req
|
|
1239
|
+
});
|
|
1240
|
+
response = runResult.response;
|
|
1241
|
+
if (runResult && Array.isArray(runResult.toolCalls)) {
|
|
1242
|
+
mcpToolCalls = runResult.toolCalls.map(function(tc){
|
|
1243
|
+
return {
|
|
1244
|
+
name: tc && (tc.name || tc.function_name || tc.tool_name),
|
|
1245
|
+
arguments: tc && tc.arguments
|
|
1246
|
+
};
|
|
1247
|
+
}).filter(function(x){ return x && x.name; });
|
|
1248
|
+
}
|
|
1249
|
+
} else {
|
|
1250
|
+
const payloadBase = {
|
|
1251
|
+
model: prompt.ai_model || "gpt-4o",
|
|
1252
|
+
instructions: finalInstructions||instructions, // string only
|
|
1253
|
+
input:finalInput||'',
|
|
1254
|
+
tools: prompt.tools || [{ "type": "web_search" }],
|
|
1255
|
+
tool_choice: prompt.tool_choice || "auto",
|
|
1256
|
+
temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
|
|
1257
|
+
//return_token_usage: true
|
|
1258
|
+
//max_tokens: prompt.max_tokens ?? 1200
|
|
1259
|
+
};
|
|
1260
|
+
coloredLog(`${payloadBase.model} and ${payloadBase.temperature}`);
|
|
1261
|
+
const mode = (prompt.attachments_mode || 'direct');
|
|
1262
|
+
let payload = payloadBase;
|
|
1263
|
+
if (Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
|
|
1264
|
+
try{
|
|
1265
|
+
payload = await attachFilesToResponsesPayload(openai, payloadBase, {
|
|
1266
|
+
attachments_mode: mode,
|
|
1267
|
+
openai_file_ids: data.openai_file_ids
|
|
1268
|
+
});
|
|
1269
|
+
}catch(e){
|
|
1270
|
+
console.warn('[chatgpt] attachFilesToResponsesPayload failed; continuing without attachments', e && e.message || e);
|
|
1271
|
+
}
|
|
1272
|
+
}
|
|
1273
|
+
response = await safeResponsesCreate(openai, payload);
|
|
1274
|
+
}
|
|
1275
|
+
|
|
1276
|
+
|
|
1277
|
+
// const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
|
|
1278
|
+
|
|
1279
|
+
// const response = await openai.chat.completions.create(payload);
|
|
1280
|
+
|
|
1281
|
+
const saved = await saveAiResponseRefactor({
|
|
1282
|
+
prompt,
|
|
1283
|
+
ai_response_content: response.output_text || "",
|
|
1284
|
+
user_prompt: finalInput || '',
|
|
1285
|
+
params,
|
|
1286
|
+
referenced_object_ids: referencedObjectIds,
|
|
1287
|
+
response_id:response.id,
|
|
1288
|
+
usage: response.usage || {},
|
|
1289
|
+
user: req && req.User,
|
|
1290
|
+
ai_assistant_id: data.ai_assistant_id,
|
|
1291
|
+
mcp_enabled: mcpEnabled,
|
|
1292
|
+
mcp_toolset: mcpToolset,
|
|
1293
|
+
mcp_selected_tools: resolvedToolNames || (Array.isArray(mcpSelected) ? mcpSelected : []),
|
|
1294
|
+
mcp_instructions_mode: mcpInstructionsMode,
|
|
1295
|
+
mcp_tools_used: mcpToolCalls
|
|
1296
|
+
});
|
|
1297
|
+
try{
|
|
1298
|
+
if (saved && Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
|
|
1299
|
+
saved.used_openai_file_ids = data.openai_file_ids.slice(0,10);
|
|
1300
|
+
await new Promise(function(resolve){
|
|
1301
|
+
JOE.Storage.save(saved,'ai_response',function(){ resolve(); },{ user: req && req.User, history:false });
|
|
1302
|
+
});
|
|
1303
|
+
}
|
|
1304
|
+
}catch(_e){}
|
|
1305
|
+
|
|
1306
|
+
return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
|
|
1307
|
+
} catch (e) {
|
|
1308
|
+
console.error('❌ executeJOEAiPrompt error:', e);
|
|
1309
|
+
return { error: "Failed to execute AI prompt.",message: e.message };
|
|
1310
|
+
}
|
|
1311
|
+
};
|
|
1312
|
+
|
|
1313
|
+
function createResponsePayload(prompt, params, instructions, user_prompt) {
|
|
1314
|
+
return {
|
|
1315
|
+
model: prompt.model || "gpt-4o",
|
|
1316
|
+
messages: [
|
|
1317
|
+
{ role: "system", content: instructions },
|
|
1318
|
+
{ role: "user", content: user_prompt || "" }
|
|
1319
|
+
],
|
|
1320
|
+
tools: prompt.tools || undefined,
|
|
1321
|
+
tool_choice: prompt.tool_choice || "auto",
|
|
1322
|
+
temperature: prompt.temperature ?? 0.7,
|
|
1323
|
+
max_tokens: prompt.max_tokens ?? 1200
|
|
1324
|
+
};
|
|
1325
|
+
}
|
|
1326
|
+
async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id, mcp_enabled, mcp_toolset, mcp_selected_tools, mcp_instructions_mode, mcp_tools_used }) {
|
|
1327
|
+
var response_keys = [];
|
|
1328
|
+
try {
|
|
1329
|
+
response_keys = Object.keys(JSON.parse(ai_response_content));
|
|
1330
|
+
}catch (e) {
|
|
1331
|
+
console.error('❌ Error parsing AI response content for keys:', e);
|
|
1332
|
+
}
|
|
1333
|
+
// Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
|
|
1334
|
+
let parsedResponse = null;
|
|
1335
|
+
try {
|
|
1336
|
+
const jt = extractJsonText(ai_response_content);
|
|
1337
|
+
if (jt) {
|
|
1338
|
+
parsedResponse = JSON.parse(jt);
|
|
1339
|
+
}
|
|
1340
|
+
} catch(_e) {
|
|
1341
|
+
parsedResponse = null;
|
|
1342
|
+
}
|
|
1343
|
+
var creator_type = null;
|
|
1344
|
+
var creator_id = null;
|
|
1345
|
+
try{
|
|
1346
|
+
if (ai_assistant_id){
|
|
1347
|
+
creator_type = 'ai_assistant';
|
|
1348
|
+
creator_id = ai_assistant_id;
|
|
1349
|
+
} else if (user && user._id){
|
|
1350
|
+
creator_type = 'user';
|
|
1351
|
+
creator_id = user._id;
|
|
1352
|
+
}
|
|
1353
|
+
}catch(_e){}
|
|
1354
|
+
const aiResponse = {
|
|
1355
|
+
name: `${prompt.name}`,
|
|
1356
|
+
itemtype: 'ai_response',
|
|
1357
|
+
ai_prompt: prompt._id,
|
|
1358
|
+
prompt_name: prompt.name,
|
|
1359
|
+
prompt_method:prompt.prompt_method,
|
|
1360
|
+
response: ai_response_content,
|
|
1361
|
+
response_json: parsedResponse,
|
|
1362
|
+
response_keys: response_keys,
|
|
1363
|
+
response_id:response_id||'',
|
|
1364
|
+
user_prompt: user_prompt,
|
|
1365
|
+
params_used: params,
|
|
1366
|
+
usage: usage || {},
|
|
1367
|
+
tags: prompt.tags || [],
|
|
1368
|
+
model_used: prompt.ai_model || "gpt-4o",
|
|
1369
|
+
referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
|
|
1370
|
+
created: (new Date).toISOString(),
|
|
1371
|
+
_id: cuid(),
|
|
1372
|
+
creator_type: creator_type,
|
|
1373
|
+
creator_id: creator_id
|
|
1374
|
+
};
|
|
1375
|
+
// Only attach MCP metadata when MCP was actually enabled for this run, to
|
|
1376
|
+
// avoid introducing nulls into history diffs.
|
|
1377
|
+
try{
|
|
1378
|
+
if (mcp_enabled) {
|
|
1379
|
+
aiResponse.mcp_enabled = true;
|
|
1380
|
+
if (mcp_toolset) { aiResponse.mcp_toolset = mcp_toolset; }
|
|
1381
|
+
if (Array.isArray(mcp_selected_tools) && mcp_selected_tools.length) {
|
|
1382
|
+
aiResponse.mcp_selected_tools = mcp_selected_tools;
|
|
1383
|
+
}
|
|
1384
|
+
if (mcp_instructions_mode) {
|
|
1385
|
+
aiResponse.mcp_instructions_mode = mcp_instructions_mode;
|
|
1386
|
+
}
|
|
1387
|
+
if (Array.isArray(mcp_tools_used) && mcp_tools_used.length) {
|
|
1388
|
+
aiResponse.mcp_tools_used = mcp_tools_used;
|
|
1389
|
+
}
|
|
1390
|
+
}
|
|
1391
|
+
}catch(_e){}
|
|
1392
|
+
|
|
1393
|
+
await new Promise((resolve, reject) => {
|
|
1394
|
+
JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
|
|
1395
|
+
if (err) {
|
|
1396
|
+
console.error('❌ Error saving AI response:', err);
|
|
1397
|
+
reject(err);
|
|
1398
|
+
} else {
|
|
1399
|
+
console.log('✅ AI response saved successfully');
|
|
1400
|
+
resolve(result);
|
|
1401
|
+
}
|
|
1402
|
+
});
|
|
1403
|
+
});
|
|
1404
|
+
|
|
1405
|
+
return aiResponse;
|
|
1406
|
+
}
|
|
1407
|
+
|
|
1408
|
+
// ---------- Widget chat endpoints (Responses API + optional assistants) ----------
|
|
1409
|
+
function normalizeMessages(messages) {
|
|
1410
|
+
if (!Array.isArray(messages)) { return []; }
|
|
1411
|
+
return messages.map(function (m) {
|
|
1412
|
+
return {
|
|
1413
|
+
role: m.role || 'assistant',
|
|
1414
|
+
content: m.content || '',
|
|
1415
|
+
created_at: m.created_at || m.created || new Date().toISOString()
|
|
1416
|
+
};
|
|
1417
|
+
});
|
|
1418
|
+
}
|
|
1419
|
+
|
|
1420
|
+
/**
|
|
1421
|
+
* widgetStart
|
|
1422
|
+
*
|
|
1423
|
+
* Purpose:
|
|
1424
|
+
* Create and persist a new `ai_widget_conversation` record for the
|
|
1425
|
+
* external `<joe-ai-widget>` chat component. This is a lightweight
|
|
1426
|
+
* conversation record that stores model, assistant, system text and
|
|
1427
|
+
* messages for the widget.
|
|
1428
|
+
*
|
|
1429
|
+
* Inputs (data):
|
|
1430
|
+
* - model (optional) override model for the widget
|
|
1431
|
+
* - ai_assistant_id (optional) JOE ai_assistant cuid
|
|
1432
|
+
* - system (optional) explicit system text
|
|
1433
|
+
* - source (optional) freeform source tag, defaults to "widget"
|
|
1434
|
+
*
|
|
1435
|
+
* OpenAI calls:
|
|
1436
|
+
* - None. This endpoint only touches storage.
|
|
1437
|
+
*
|
|
1438
|
+
* Output:
|
|
1439
|
+
* - { success, conversation_id, model, assistant_id }
|
|
1440
|
+
* where assistant_id is the OpenAI assistant_id (if present).
|
|
1441
|
+
*/
|
|
1442
|
+
this.widgetStart = async function (data, req, res) {
|
|
1443
|
+
try {
|
|
1444
|
+
var body = data || {};
|
|
1445
|
+
// Default to a modern chat model when no assistant/model is provided.
|
|
1446
|
+
// If an assistant is supplied, its ai_model will override this.
|
|
1447
|
+
var model = body.model || "gpt-5.1";
|
|
1448
|
+
var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
|
|
1449
|
+
var system = body.system || (assistant && assistant.instructions) || "";
|
|
1450
|
+
// Prefer explicit user fields coming from the client (ai-widget-test page
|
|
1451
|
+
// passes _joe.User fields). Widget endpoints no longer infer from req.User
|
|
1452
|
+
// to keep a single, explicit source of truth.
|
|
1453
|
+
var user = null;
|
|
1454
|
+
if (body.user_id || body.user_name || body.user_color) {
|
|
1455
|
+
user = {
|
|
1456
|
+
_id: body.user_id,
|
|
1457
|
+
name: body.user_name,
|
|
1458
|
+
fullname: body.user_name,
|
|
1459
|
+
color: body.user_color
|
|
1460
|
+
};
|
|
1461
|
+
}
|
|
1462
|
+
var user_color = (body.user_color) || (user && user.color) || null;
|
|
1463
|
+
|
|
1464
|
+
var convo = {
|
|
1465
|
+
_id: (typeof cuid === 'function') ? cuid() : undefined,
|
|
1466
|
+
itemtype: "ai_widget_conversation",
|
|
1467
|
+
model: (assistant && assistant.ai_model) || model,
|
|
1468
|
+
assistant: assistant && assistant._id,
|
|
1469
|
+
assistant_id: assistant && assistant.assistant_id,
|
|
1470
|
+
assistant_color: assistant && assistant.assistant_color,
|
|
1471
|
+
user: user && user._id,
|
|
1472
|
+
user_name: user && (user.fullname || user.name),
|
|
1473
|
+
user_color: user_color,
|
|
1474
|
+
system: system,
|
|
1475
|
+
messages: [],
|
|
1476
|
+
source: body.source || "widget",
|
|
1477
|
+
created: new Date().toISOString(),
|
|
1478
|
+
joeUpdated: new Date().toISOString()
|
|
1479
|
+
};
|
|
1480
|
+
|
|
1481
|
+
const saved = await new Promise(function (resolve, reject) {
|
|
1482
|
+
// Widget conversations are lightweight and do not need full history diffs.
|
|
1483
|
+
JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
|
|
1484
|
+
if (err) return reject(err);
|
|
1485
|
+
resolve(result);
|
|
1486
|
+
}, { history: false });
|
|
1487
|
+
});
|
|
1488
|
+
|
|
1489
|
+
return {
|
|
1490
|
+
success: true,
|
|
1491
|
+
conversation_id: saved._id,
|
|
1492
|
+
model: saved.model,
|
|
1493
|
+
assistant_id: saved.assistant_id || null,
|
|
1494
|
+
assistant_color: saved.assistant_color || null,
|
|
1495
|
+
user_color: saved.user_color || user_color || null
|
|
1496
|
+
};
|
|
1497
|
+
} catch (e) {
|
|
1498
|
+
console.error("[chatgpt] widgetStart error:", e);
|
|
1499
|
+
return { success: false, error: e && e.message || "Unknown error" };
|
|
1500
|
+
}
|
|
1501
|
+
};
|
|
1502
|
+
|
|
1503
|
+
/**
|
|
1504
|
+
* widgetHistory
|
|
1505
|
+
*
|
|
1506
|
+
* Purpose:
|
|
1507
|
+
* Load an existing `ai_widget_conversation` and normalize its
|
|
1508
|
+
* messages for use by `<joe-ai-widget>` on page load or refresh.
|
|
1509
|
+
*
|
|
1510
|
+
* Inputs (data):
|
|
1511
|
+
* - conversation_id or _id: the widget conversation cuid
|
|
1512
|
+
*
|
|
1513
|
+
* OpenAI calls:
|
|
1514
|
+
* - None. Purely storage + normalization.
|
|
1515
|
+
*
|
|
1516
|
+
* Output:
|
|
1517
|
+
* - { success, conversation_id, model, assistant_id, messages }
|
|
1518
|
+
*/
|
|
1519
|
+
this.widgetHistory = async function (data, req, res) {
|
|
1520
|
+
try {
|
|
1521
|
+
var conversation_id = data.conversation_id || data._id;
|
|
1522
|
+
if (!conversation_id) {
|
|
1523
|
+
return { success: false, error: "Missing conversation_id" };
|
|
1524
|
+
}
|
|
1525
|
+
const convo = await new Promise(function (resolve, reject) {
|
|
1526
|
+
JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
|
|
1527
|
+
if (err) return reject(err);
|
|
1528
|
+
resolve(results && results[0]);
|
|
1529
|
+
});
|
|
1530
|
+
});
|
|
1531
|
+
if (!convo) {
|
|
1532
|
+
return { success: false, error: "Conversation not found" };
|
|
1533
|
+
}
|
|
1534
|
+
|
|
1535
|
+
convo.messages = normalizeMessages(convo.messages);
|
|
1536
|
+
return {
|
|
1537
|
+
success: true,
|
|
1538
|
+
conversation_id: convo._id,
|
|
1539
|
+
model: convo.model,
|
|
1540
|
+
assistant_id: convo.assistant_id || null,
|
|
1541
|
+
assistant_color: convo.assistant_color || null,
|
|
1542
|
+
user_color: convo.user_color || null,
|
|
1543
|
+
messages: convo.messages
|
|
1544
|
+
};
|
|
1545
|
+
} catch (e) {
|
|
1546
|
+
console.error("[chatgpt] widgetHistory error:", e);
|
|
1547
|
+
return { success: false, error: e && e.message || "Unknown error" };
|
|
1548
|
+
}
|
|
1549
|
+
};
|
|
1550
|
+
|
|
1551
|
+
/**
|
|
1552
|
+
* widgetMessage
|
|
1553
|
+
*
|
|
1554
|
+
* Purpose:
|
|
1555
|
+
* Handle a single user turn for `<joe-ai-widget>`:
|
|
1556
|
+
* - Append the user message to the stored conversation.
|
|
1557
|
+
* - Call OpenAI Responses (optionally with tools from the selected
|
|
1558
|
+
* `ai_assistant`, via runWithTools + MCP).
|
|
1559
|
+
* - Append the assistant reply, persist the conversation, and return
|
|
1560
|
+
* the full message history plus the latest assistant message.
|
|
1561
|
+
*
|
|
1562
|
+
* Inputs (data):
|
|
1563
|
+
* - conversation_id or _id: cuid of the widget conversation
|
|
1564
|
+
* - content: user text
|
|
1565
|
+
* - role: user role, defaults to "user"
|
|
1566
|
+
* - assistant_id: optional OpenAI assistant_id (used only to
|
|
1567
|
+
* locate the JOE ai_assistant config)
|
|
1568
|
+
* - model: optional model override
|
|
1569
|
+
*
|
|
1570
|
+
* OpenAI calls:
|
|
1571
|
+
* - responses.create (once if no tools; twice when tools are present):
|
|
1572
|
+
* * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
|
|
1573
|
+
* * Any tool calls are executed via MCP and injected as `tool` messages.
|
|
1574
|
+
* * Second call is plain Responses with updated messages.
|
|
1575
|
+
*
|
|
1576
|
+
* Output:
|
|
1577
|
+
* - { success, conversation_id, model, assistant_id, messages,
|
|
1578
|
+
* last_message, usage }
|
|
1579
|
+
*/
|
|
1580
|
+
this.widgetMessage = async function (data, req, res) {
|
|
1581
|
+
try {
|
|
1582
|
+
var body = data || {};
|
|
1583
|
+
var conversation_id = body.conversation_id || body._id;
|
|
1584
|
+
var content = body.content;
|
|
1585
|
+
var role = body.role || "user";
|
|
1586
|
+
|
|
1587
|
+
if (!conversation_id || !content) {
|
|
1588
|
+
return { success: false, error: "Missing conversation_id or content" };
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
const convo = await new Promise(function (resolve, reject) {
|
|
1592
|
+
JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
|
|
1593
|
+
if (err) return reject(err);
|
|
1594
|
+
resolve(results && results[0]);
|
|
1595
|
+
});
|
|
1596
|
+
});
|
|
1597
|
+
if (!convo) {
|
|
1598
|
+
return { success: false, error: "Conversation not found" };
|
|
1599
|
+
}
|
|
1600
|
+
|
|
1601
|
+
convo.messages = normalizeMessages(convo.messages);
|
|
1602
|
+
const nowIso = new Date().toISOString();
|
|
1603
|
+
|
|
1604
|
+
// Append user message
|
|
1605
|
+
const userMsg = { role: role, content: content, created_at: nowIso };
|
|
1606
|
+
convo.messages.push(userMsg);
|
|
1607
|
+
|
|
1608
|
+
// Backfill user metadata (id/name/color) on older conversations that
|
|
1609
|
+
// were created before we started storing these fields. Prefer explicit
|
|
1610
|
+
// body fields only; we no longer infer from req.User so that widget
|
|
1611
|
+
// calls always have a single, explicit user source.
|
|
1612
|
+
var u = null;
|
|
1613
|
+
if (body.user_id || body.user_name || body.user_color) {
|
|
1614
|
+
u = {
|
|
1615
|
+
_id: body.user_id,
|
|
1616
|
+
name: body.user_name,
|
|
1617
|
+
fullname: body.user_name,
|
|
1618
|
+
color: body.user_color
|
|
1619
|
+
};
|
|
1620
|
+
}
|
|
1621
|
+
if (u) {
|
|
1622
|
+
if (!convo.user && u._id) {
|
|
1623
|
+
convo.user = u._id;
|
|
1624
|
+
}
|
|
1625
|
+
if (!convo.user_name && (u.fullname || u.name)) {
|
|
1626
|
+
convo.user_name = u.fullname || u.name;
|
|
1627
|
+
}
|
|
1628
|
+
if (!convo.user_color && u.color) {
|
|
1629
|
+
convo.user_color = u.color;
|
|
1630
|
+
}
|
|
1631
|
+
}
|
|
1632
|
+
|
|
1633
|
+
const assistantId = body.assistant_id || convo.assistant_id || null;
|
|
1634
|
+
// NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
|
|
1635
|
+
// We do NOT pass assistant_id to the Responses API (it is not supported in the
|
|
1636
|
+
// version we are using); instead we look up the JOE ai_assistant by assistant_id
|
|
1637
|
+
// and inject its configuration (model, instructions, tools) into the request.
|
|
1638
|
+
var assistantObj = null;
|
|
1639
|
+
if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
|
|
1640
|
+
assistantObj = JOE.Data.ai_assistant.find(function (a) {
|
|
1641
|
+
return a && a.assistant_id === assistantId;
|
|
1642
|
+
}) || null;
|
|
1643
|
+
}
|
|
1644
|
+
const openai = newClient();
|
|
1645
|
+
const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
|
|
1646
|
+
|
|
1647
|
+
// Prefer explicit system text on the conversation, then assistant instructions.
|
|
1648
|
+
const systemText = (convo.system && String(convo.system)) ||
|
|
1649
|
+
(assistantObj && assistantObj.instructions) ||
|
|
1650
|
+
"";
|
|
1651
|
+
const messagesForModel = convo.messages.map(function (m) {
|
|
1652
|
+
return { role: m.role, content: m.content };
|
|
1653
|
+
});
|
|
1654
|
+
|
|
1655
|
+
// Use runWithTools so that, when an assistant has tools configured,
|
|
1656
|
+
// we let the model call those tools via MCP before generating a
|
|
1657
|
+
// final response.
|
|
1658
|
+
const runResult = await runWithTools({
|
|
1659
|
+
openai: openai,
|
|
1660
|
+
model: model,
|
|
1661
|
+
systemText: systemText,
|
|
1662
|
+
messages: messagesForModel,
|
|
1663
|
+
assistant: assistantObj,
|
|
1664
|
+
req: req
|
|
1665
|
+
});
|
|
1666
|
+
|
|
1667
|
+
// If tools were called this turn, inject a small meta message so the
|
|
1668
|
+
// widget clearly shows which functions ran before the assistant reply.
|
|
1669
|
+
if (runResult.toolCalls && runResult.toolCalls.length) {
|
|
1670
|
+
const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
|
|
1671
|
+
.filter(Boolean)
|
|
1672
|
+
.join(', ');
|
|
1673
|
+
convo.messages.push({
|
|
1674
|
+
role: "assistant",
|
|
1675
|
+
meta: "tools_used",
|
|
1676
|
+
content: "[Tools used this turn: " + names + "]",
|
|
1677
|
+
created_at: nowIso
|
|
1678
|
+
});
|
|
1679
|
+
}
|
|
1680
|
+
|
|
1681
|
+
const assistantText = runResult.finalText || "";
|
|
1682
|
+
const assistantMsg = {
|
|
1683
|
+
role: "assistant",
|
|
1684
|
+
content: assistantText,
|
|
1685
|
+
created_at: new Date().toISOString()
|
|
1686
|
+
};
|
|
1687
|
+
convo.messages.push(assistantMsg);
|
|
1688
|
+
convo.last_message_at = assistantMsg.created_at;
|
|
1689
|
+
convo.joeUpdated = assistantMsg.created_at;
|
|
1690
|
+
|
|
1691
|
+
await new Promise(function (resolve, reject) {
|
|
1692
|
+
// Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
|
|
1693
|
+
JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
|
|
1694
|
+
if (err) return reject(err);
|
|
1695
|
+
resolve(saved);
|
|
1696
|
+
}, { history: false });
|
|
1697
|
+
});
|
|
1698
|
+
|
|
1699
|
+
return {
|
|
1700
|
+
success: true,
|
|
1701
|
+
conversation_id: convo._id,
|
|
1702
|
+
model: model,
|
|
1703
|
+
assistant_id: assistantId,
|
|
1704
|
+
assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
|
|
1705
|
+
user_color: convo.user_color || ((u && u.color) || null),
|
|
1706
|
+
messages: convo.messages,
|
|
1707
|
+
last_message: assistantMsg,
|
|
1708
|
+
// Usage comes from the underlying Responses call inside runWithTools.
|
|
1709
|
+
usage: (runResult.response && runResult.response.usage) || {}
|
|
1710
|
+
};
|
|
1711
|
+
} catch (e) {
|
|
1712
|
+
console.error("[chatgpt] widgetMessage error:", e);
|
|
1713
|
+
return { success: false, error: e && e.message || "Unknown error" };
|
|
1714
|
+
}
|
|
1715
|
+
};
|
|
1716
|
+
|
|
1717
|
+
// Mark async plugin methods so Server.pluginHandling will await them.
|
|
1718
|
+
this.async = {
|
|
1719
|
+
executeJOEAiPrompt: this.executeJOEAiPrompt,
|
|
1720
|
+
testPrompt: this.testPrompt,
|
|
1721
|
+
sendInitialConsultTranscript: this.sendInitialConsultTranscript,
|
|
1722
|
+
widgetStart: this.widgetStart,
|
|
1723
|
+
widgetHistory: this.widgetHistory,
|
|
1724
|
+
widgetMessage: this.widgetMessage,
|
|
1725
|
+
autofill: this.autofill,
|
|
1726
|
+
filesRetryFromUrl: this.filesRetryFromUrl
|
|
1727
|
+
};
|
|
1728
|
+
this.protected = [,'testPrompt'];
|
|
1729
|
+
return self;
|
|
1730
|
+
}
|
|
1731
|
+
|
|
1732
|
+
module.exports = new ChatGPT();
|