json-object-editor 0.10.653 → 0.10.657

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,1339 +1,1732 @@
1
- const OpenAI = require("openai");
2
- const { google } = require('googleapis');
3
- const path = require('path');
4
- const MCP = require("../modules/MCP.js");
5
- // const { name } = require("json-object-editor/server/webconfig");
6
-
7
- function ChatGPT() {
8
- // const fetch = (await import('node-fetch')).default;
9
- //const openai = new OpenAI();
10
- // Load the service account key JSON file
11
- const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
12
- const google_auth = new google.auth.GoogleAuth({
13
- keyFile: serviceAccountKeyFile,
14
- scopes: ['https://www.googleapis.com/auth/documents.readonly'],
15
- });
16
-
17
- var self = this;
18
- this.async ={};
19
- function coloredLog(message){
20
- console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
21
- }
22
- //xx -setup and send a test prompt to chatgpt
23
- //xx get the api key from joe settings
24
-
25
- //get a prompt from id
26
- //send the prompt to chatgpt
27
-
28
- //++get the cotnent of a file
29
- //++send the content of a file to chatgpt
30
-
31
- //++ structure data
32
- //++ save the response to an ai_repsonse
33
- //create an ai_response
34
- //store the content
35
- //attach to the request
36
- //store ids sent with the request
37
- this.default = function(data, req, res) {
38
- try {
39
- var payload = {
40
- params: req.params,
41
- data: data
42
- };
43
- } catch (e) {
44
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
45
- }
46
- return payload;
47
- };
48
- function getAPIKey() {
49
- const setting = JOE.Utils.Settings('OPENAI_API_KEY');
50
- if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
51
- return setting;
52
- }
53
- function getSchemaDef(name) {
54
- if (!name) return { full: null, summary: null };
55
- const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
56
- const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
57
- return { full, summary };
58
- }
59
- /**
60
- * callMCPTool
61
- *
62
- * Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
63
- * without going over HTTP or worrying about POST size limits.
64
- *
65
- * Usage:
66
- * const result = await callMCPTool('listSchemas', {}, { req });
67
- *
68
- * Notes:
69
- * - `toolName` must exist on MCP.tools.
70
- * - `params` should be a plain JSON-serializable object.
71
- * - `ctx` is optional and can pass `{ req }` or other context that MCP
72
- * tools might want (for auth, user, etc.).
73
- */
74
- async function callMCPTool(toolName, params = {}, ctx = {}) {
75
- if (!MCP || !MCP.tools) {
76
- throw new Error("MCP module not initialized; cannot call MCP tool");
77
- }
78
- if (!toolName || typeof toolName !== 'string') {
79
- throw new Error("Missing or invalid MCP tool name");
80
- }
81
- const fn = MCP.tools[toolName];
82
- if (typeof fn !== 'function') {
83
- throw new Error(`MCP tool "${toolName}" not found`);
84
- }
85
- try {
86
- // All MCP tools accept (params, ctx) and return a JSON-serializable result.
87
- // The Responses / tools API often returns arguments as a JSON string, so
88
- // normalize that here before invoking the tool.
89
- let toolParams = params;
90
- if (typeof toolParams === 'string') {
91
- try {
92
- toolParams = JSON.parse(toolParams);
93
- } catch (parseErr) {
94
- console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
95
- // Fall back to passing the raw string so tools that expect it still work.
96
- }
97
- }
98
- const result = await fn(toolParams || {}, ctx || {});
99
- return result;
100
- } catch (e) {
101
- // Surface a clean error upstream but keep details in logs.
102
- console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
103
- throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
104
- }
105
- }
106
-
107
- /**
108
- * extractToolCalls
109
- *
110
- * Best-effort parser for tool calls from a Responses API result.
111
- * The Responses output shape may evolve; this function looks for
112
- * any "tool_call" typed content in response.output[*].content[*]
113
- * and normalizes it into `{ name, arguments }` objects.
114
- */
115
- function extractToolCalls(response) {
116
- var calls = [];
117
- if (!response || !Array.isArray(response.output)) { return calls; }
118
-
119
- response.output.forEach(function (item) {
120
- if (!item) { return; }
121
- // v1-style: item.type === 'tool_call'
122
- if (item.type === 'function_call') {
123
- calls.push({
124
- name: item.name || item.function_name,
125
- arguments: item.arguments || item.function_arguments || {}
126
- });
127
- }
128
- // message-style: item.content is an array of parts
129
- if (Array.isArray(item.content)) {
130
- item.content.forEach(function (part) {
131
- if (!part) { return; }
132
- if (part.type === 'function_call') {
133
- calls.push({
134
- name: part.name || part.tool_name,
135
- arguments: part.arguments || part.args || {}
136
- });
137
- }
138
- });
139
- }
140
- });
141
-
142
- return calls;
143
- }
144
-
145
- // Detect "request too large / token limit" style errors from the Responses API.
146
- function isTokenLimitError(err) {
147
- if (!err || typeof err !== 'object') return false;
148
- if (err.status !== 429 && err.status !== 400) return false;
149
- const msg = (err.error && err.error.message) || err.message || '';
150
- if (!msg) return false;
151
- const lower = String(msg).toLowerCase();
152
- // Cover common phrasing from OpenAI for context/TPM limits.
153
- return (
154
- lower.includes('request too large') ||
155
- lower.includes('too many tokens') ||
156
- lower.includes('max tokens') ||
157
- lower.includes('maximum context length') ||
158
- lower.includes('tokens per min')
159
- );
160
- }
161
-
162
- // Create a compact representation of a JOE object for use in slim payloads.
163
- function slimJOEObject(item) {
164
- if (!item || typeof item !== 'object') return item;
165
- const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
166
- const info = item.info || item.description || item.summary || '';
167
- return {
168
- _id: item._id,
169
- itemtype: item.itemtype,
170
- name: name,
171
- info: info
172
- };
173
- }
174
-
175
- // Given an `understandObject` result, produce a slimmed version:
176
- // - keep `object` as-is
177
- // - keep `flattened` for the main object (depth-limited) if present
178
- // - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
179
- // - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
180
- function slimUnderstandObjectResult(result) {
181
- if (!result || typeof result !== 'object') return result;
182
- const out = {
183
- _id: result._id,
184
- itemtype: result.itemtype,
185
- object: result.object,
186
- // retain main flattened view if available; this is typically much smaller
187
- flattened: result.flattened || null,
188
- schemas: result.schemas || {},
189
- tags: result.tags || {},
190
- statuses: result.statuses || {},
191
- slim: true
192
- };
193
- if (Array.isArray(result.related)) {
194
- out.related = result.related.map(function (rel) {
195
- if (!rel) return rel;
196
- const base = rel.object || {};
197
- const slim = slimJOEObject(base);
198
- return {
199
- field: rel.field,
200
- _id: slim && slim._id || rel._id,
201
- itemtype: slim && slim.itemtype || rel.itemtype,
202
- object: slim
203
- };
204
- });
205
- } else {
206
- out.related = [];
207
- }
208
- return out;
209
- }
210
-
211
- // Walk the messages array and, for any system message containing a JSON payload
212
- // of the form { "tool": "understandObject", "result": {...} }, replace the
213
- // result with a slimmed version to reduce token count. Returns a new array; if
214
- // nothing was changed, returns the original array.
215
- function shrinkUnderstandObjectMessagesForTokens(messages) {
216
- if (!Array.isArray(messages)) return messages;
217
- let changed = false;
218
- const shrunk = messages.map(function (msg) {
219
- if (!msg || msg.role !== 'system') return msg;
220
- if (typeof msg.content !== 'string') return msg;
221
- try {
222
- const parsed = JSON.parse(msg.content);
223
- if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
224
- return msg;
225
- }
226
- const slimmed = slimUnderstandObjectResult(parsed.result);
227
- changed = true;
228
- return {
229
- ...msg,
230
- content: JSON.stringify({ tool: 'understandObject', result: slimmed })
231
- };
232
- } catch (_e) {
233
- return msg;
234
- }
235
- });
236
- return changed ? shrunk : messages;
237
- }
238
-
239
- /**
240
- * runWithTools
241
- *
242
- * Single orchestration function for calling the OpenAI Responses API
243
- * with optional tools (sourced from a JOE `ai_assistant`), handling
244
- * tool calls via MCP, and issuing a follow-up model call with the
245
- * tool results injected.
246
- *
247
- * Inputs (opts):
248
- * - openai: OpenAI client instance
249
- * - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
250
- * - systemText: string of system / instructions text
251
- * - messages: array of { role, content } for the conversation so far
252
- * - assistant: JOE `ai_assistant` object (may contain `tools`)
253
- * - req: Express request (passed into MCP tools as context)
254
- *
255
- * Returns:
256
- * - { response, finalText, messages, toolCalls }
257
- * where `finalText` is the assistant-facing text (from output_text)
258
- * and `messages` is the possibly-extended message list including
259
- * any synthetic `tool` messages.
260
- */
261
- async function runWithTools(opts) {
262
- const openai = opts.openai;
263
- const model = opts.model;
264
- const systemText = opts.systemText || "";
265
- const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
266
- const assistant = opts.assistant || null;
267
- const req = opts.req;
268
-
269
- // Normalize tools: in many schemas tools may be stored as a JSON string;
270
- // here we accept either an array or a JSON-stringified array.
271
- let tools = null;
272
- if (assistant && assistant.tools) {
273
- if (Array.isArray(assistant.tools)) {
274
- tools = assistant.tools;
275
- } else if (typeof assistant.tools === 'string') {
276
- try {
277
- const parsed = JSON.parse(assistant.tools);
278
- if (Array.isArray(parsed)) {
279
- tools = parsed;
280
- }
281
- } catch (e) {
282
- console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
283
- }
284
- }
285
- }
286
- // Normalize tool definitions for the Responses API. The assistant UI
287
- // uses the Assistants-style shape ({ type:'function', function:{...} }),
288
- // but Responses expects the name/description/parameters at the top level:
289
- // { type:'function', name:'x', description:'...', parameters:{...} }
290
- if (Array.isArray(tools)) {
291
- tools = tools.map(function (t) {
292
- if (t && t.type === 'function' && t.function && !t.name) {
293
- const fn = t.function || {};
294
- return {
295
- type: 'function',
296
- name: fn.name,
297
- description: fn.description,
298
- parameters: fn.parameters || {}
299
- };
300
- }
301
- return t;
302
- });
303
- }
304
-
305
- // No tools configured – do a simple single Responses call.
306
- if (!tools) {
307
- const resp = await openai.responses.create({
308
- model: model,
309
- instructions: systemText,
310
- input: messages
311
- });
312
- return {
313
- response: resp,
314
- finalText: resp.output_text || "",
315
- messages: messages,
316
- toolCalls: []
317
- };
318
- }
319
-
320
- // Step 1: call the model with tools enabled.
321
- const first = await openai.responses.create({
322
- model: model,
323
- instructions: systemText,
324
- input: messages,
325
- tools: tools,
326
- tool_choice: "auto"
327
- });
328
-
329
- const toolCalls = extractToolCalls(first);
330
-
331
- // If the model didn't decide to use tools, just return the first answer.
332
- if (!toolCalls.length) {
333
- return {
334
- response: first,
335
- finalText: first.output_text || "",
336
- messages: messages,
337
- toolCalls: []
338
- };
339
- }
340
-
341
- // Step 2: execute each tool call via MCP and append tool results.
342
- for (let i = 0; i < toolCalls.length; i++) {
343
- const tc = toolCalls[i];
344
- try {
345
- const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
346
- messages.push({
347
- // Responses API does not support a "tool" role in messages.
348
- // We inject tool outputs as a synthetic system message so
349
- // the model can see the results without affecting the
350
- // user/assistant turn structure.
351
- role: "system",
352
- content: JSON.stringify({ tool: tc.name, result: result })
353
- });
354
- } catch (e) {
355
- console.error("[chatgpt] MCP tool error in runWithTools:", e);
356
- messages.push({
357
- role: "system",
358
- content: JSON.stringify({
359
- tool: tc.name,
360
- error: e && e.message || "Tool execution failed"
361
- })
362
- });
363
- }
364
- }
365
-
366
- // Step 3: ask the model again with tool outputs included.
367
- let finalMessages = messages;
368
- let second;
369
- try {
370
- second = await openai.responses.create({
371
- model: model,
372
- instructions: systemText,
373
- input: finalMessages
374
- });
375
- } catch (e) {
376
- if (isTokenLimitError(e)) {
377
- console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
378
- const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
379
- // If nothing was shrunk, just rethrow the original error.
380
- if (shrunk === finalMessages) {
381
- throw e;
382
- }
383
- finalMessages = shrunk;
384
- // Retry once with the smaller payload; let any error bubble up.
385
- second = await openai.responses.create({
386
- model: model,
387
- instructions: systemText,
388
- input: finalMessages
389
- });
390
- } else {
391
- throw e;
392
- }
393
- }
394
-
395
- return {
396
- response: second,
397
- finalText: second.output_text || "",
398
- messages: finalMessages,
399
- toolCalls: toolCalls
400
- };
401
- }
402
-
403
- // function newClient(){
404
- // var key = getAPIKey();
405
- // var c = new OpenAI({
406
- // apiKey: key, // This is the default and can be omitted
407
- // });
408
- // if(!c || !c.apiKey){
409
- // return { errors: 'No API key provided' };
410
- // }
411
- // return c;
412
- // }
413
- function newClient() {
414
- return new OpenAI({ apiKey: getAPIKey() });
415
- }
416
- this.testPrompt= async function(data, req, res) {
417
- try {
418
- var payload = {
419
- params: req.params,
420
- data: data
421
- };
422
- } catch (e) {
423
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
424
- }
425
- const client = newClient();
426
- if(client.errors){
427
- return { errors: client.errors };
428
- }
429
- try {
430
- const chatCompletion = await client.chat.completions.create({
431
- messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
432
- model: 'gpt-4o',
433
- });
434
- coloredLog(chatCompletion);
435
- const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
436
- // Optionally persist as ai_response with parsed JSON when applicable
437
- const parsed = (function(){
438
- try {
439
- const jt = extractJsonText(text);
440
- return jt ? JSON.parse(jt) : null;
441
- } catch(_e){ return null; }
442
- })();
443
- try {
444
- var creator_type = null;
445
- var creator_id = null;
446
- try{
447
- var u = req && req.User;
448
- if (u && u._id){
449
- creator_type = 'user';
450
- creator_id = u._id;
451
- }
452
- }catch(_e){}
453
- const aiResponse = {
454
- itemtype: 'ai_response',
455
- name: 'Test Prompt ChatGPT',
456
- response_type: 'testPrompt',
457
- response: text,
458
- response_json: parsed,
459
- response_id: chatCompletion.id || '',
460
- user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
461
- model_used: 'gpt-4o',
462
- created: (new Date()).toISOString(),
463
- creator_type: creator_type,
464
- creator_id: creator_id
465
- };
466
- JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
467
- } catch(_e){ /* best-effort only */ }
468
- return {payload,chatCompletion,content:text};
469
- } catch (error) {
470
- if (error.status === 429) {
471
- return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
472
- } else {
473
- return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
474
- }
475
- }
476
- }
477
-
478
- this.sendInitialConsultTranscript= async function(data, req, res) {
479
- coloredLog("sendInitialConsultTranscript");
480
- //get the prompt object from the prompt id
481
- //get the business object from the refrenced object id
482
- //see if there is a initial_transcript_url property on that object
483
- //if there is, get the content of the file
484
- //send the content to chatgpt, with the template property of the prompt object
485
- //get the response
486
- try {
487
- var payload = {
488
- params: req.params,
489
- data: data
490
- };
491
- } catch (e) {
492
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
493
- }
494
- var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
495
- var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
496
-
497
-
498
- // See if there is an initial_transcript_url property on that object
499
- const transcriptUrl = businessOBJ.initial_transcript_url;
500
- if (!transcriptUrl) {
501
- return res.jsonp({ error: 'No initial transcript URL found' });
502
- }
503
-
504
- //Get the content of the file from Google Docs
505
- const transcriptContent = await getGoogleDocContent(transcriptUrl);
506
- if (!transcriptContent || transcriptContent.error) {
507
- return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
508
- }
509
- const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
510
- payload.tokenCount = tokenCount;
511
- coloredLog("token count: "+tokenCount);
512
- //return res.jsonp({tokens:tokenCount,content:transcriptContent});
513
- // Send the content to ChatGPT, with the template property of the prompt object
514
- const client = new OpenAI({
515
- apiKey: getAPIKey(), // This is the default and can be omitted
516
- });
517
-
518
- const chatResponse = await client.chat.completions.create({
519
- messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
520
- model: 'gpt-4o',
521
- });
522
-
523
- // Get the response
524
- const chatContent = chatResponse.choices[0].message.content;
525
- const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
526
- // Save the response
527
- await saveAIResponse({
528
- name:responseName,
529
- business: data.business,
530
- ai_prompt: data.ai_prompt,
531
- response: chatContent,
532
- payload,
533
- prompt_method:req.params.method
534
- }, req && req.User);
535
- coloredLog("response saved -"+responseName);
536
- return {payload,
537
- businessOBJ,
538
- promptOBJ,
539
- chatContent,
540
- responseName
541
- };
542
-
543
- }
544
-
545
- async function getGoogleDocContent(docUrl) {
546
- try {
547
- const auth = new google.auth.GoogleAuth({
548
- scopes: ['https://www.googleapis.com/auth/documents.readonly']
549
- });
550
- //get google docs apikey from settings
551
- const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
552
- const docs = google.docs({ version: 'v1', auth:google_auth });
553
- const docId = extractDocIdFromUrl(docUrl);
554
- const doc = await docs.documents.get({ documentId: docId });
555
-
556
- let content = doc.data.body.content.map(element => {
557
- if (element.paragraph && element.paragraph.elements) {
558
- return element.paragraph.elements.map(
559
- e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
560
- : ''
561
- ).join('');
562
- }
563
- return '';
564
- }).join('\n');
565
-
566
- // Remove timestamps and line numbers
567
- //content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
568
-
569
- return content;
570
- } catch (error) {
571
- console.error('Error fetching Google Doc content:', error);
572
- return {error};
573
- }
574
- }
575
- function countTokens(text, model = 'gpt-4o') {
576
- const enc = encoding_for_model(model);
577
- const tokens = enc.encode(text);
578
- return tokens.length;
579
- }
580
- function extractDocIdFromUrl(url) {
581
- const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
582
- return match ? match[1] : null;
583
- }
584
-
585
- async function saveAIResponse(data, user) {
586
- try {
587
- var creator_type = null;
588
- var creator_id = null;
589
- try{
590
- if (user && user._id){
591
- creator_type = 'user';
592
- creator_id = user._id;
593
- }
594
- }catch(_e){}
595
- const aiResponse = {
596
- name: data.name,
597
- itemtype: 'ai_response',
598
- business: data.business,
599
- ai_prompt: data.ai_prompt,
600
- response: data.response,
601
- payload: data.payload,
602
- prompt_method:data.prompt_method,
603
- created: (new Date).toISOString(),
604
- _id:cuid(),
605
- creator_type: creator_type,
606
- creator_id: creator_id
607
- // Add any other fields you want to save
608
- };
609
- await new Promise((resolve, reject) => {
610
- JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
611
- if (err) {
612
- coloredLog('Error saving AI response: ' + err);
613
- reject(err);
614
- } else {
615
- coloredLog('AI response saved successfully');
616
- resolve(result);
617
- }
618
- });
619
- });
620
- } catch (error) {
621
- coloredLog('Error in saveAIResponse: ' + error);
622
- }
623
- }
624
-
625
- // Normalize model output that should contain JSON. Models often wrap JSON
626
- // in markdown fences (```json ... ```), and may prepend/append prose. This
627
- // helper strips fences and tries to isolate the first well-formed JSON
628
- // object/array substring so JSON.parse has the best chance of succeeding.
629
- function extractJsonText(raw) {
630
- if (!raw) { return ''; }
631
- let t = String(raw).trim();
632
- // If there is any ```...``` fenced block, prefer its contents.
633
- const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
634
- if (fenceIdx !== -1) {
635
- let start = fenceIdx;
636
- const firstNewline = t.indexOf('\n', start);
637
- if (firstNewline !== -1) {
638
- t = t.substring(firstNewline + 1);
639
- } else {
640
- t = t.substring(start + 3);
641
- }
642
- const lastFence = t.lastIndexOf('```');
643
- if (lastFence !== -1) {
644
- t = t.substring(0, lastFence);
645
- }
646
- t = t.trim();
647
- }
648
- // If there's extra prose around the JSON, slice from first {/[ to last }/]
649
- if (t[0] !== '{' && t[0] !== '[') {
650
- const firstBrace = t.indexOf('{');
651
- const firstBracket = t.indexOf('[');
652
- let first = -1;
653
- if (firstBrace === -1) { first = firstBracket; }
654
- else if (firstBracket === -1) { first = firstBrace; }
655
- else { first = Math.min(firstBrace, firstBracket); }
656
- const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
657
- if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
658
- t = t.slice(first, lastBrace + 1);
659
- }
660
- }
661
- return t.trim();
662
- }
663
-
664
- // Autofill feature (Responses API; supports assistant_id or model)
665
- this.autofill = async function (data, req, res) {
666
- const startedAt = Date.now();
667
- try {
668
- const body = data || {};
669
- const objectId = body.object_id || body._id;
670
- const object = body.object || $J.get(objectId);
671
- const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
672
- const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
673
- const rawFields = body.fields || body.field;
674
- const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
675
- const userPrompt = body.prompt || '';
676
- const assistantId = body.assistant_id || null;
677
-
678
- if (!object) {
679
- return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
680
- }
681
- if (!schemaName) {
682
- return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
683
- }
684
- if (!fields.length) {
685
- return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
686
- }
687
-
688
- const flattened = JOE.Utils.flattenObject(object._id);
689
- const systemText = [
690
- 'You are JOE (Json Object Editor) assistant.',
691
- 'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
692
- '- Respect field types (text, number, arrays, enums, references).',
693
- '- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
694
- '- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
695
- '- If a field is an array, return an array of values.',
696
- '- Never modify unrelated fields.',
697
- '- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
698
- '- If you lack sufficient information, return an empty patch.'
699
- ].join('\\n');
700
-
701
- const schemaForContext = schemaSummary || schemaFull || {};
702
- const userInput = JSON.stringify({
703
- action: 'autofill_fields',
704
- target_schema: schemaName,
705
- requested_fields: fields,
706
- user_prompt: userPrompt,
707
- object_context: flattened,
708
- schema_context: schemaForContext
709
- }, null, ' ');
710
-
711
- const openai = newClient();
712
- const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
713
-
714
- // For simplicity and robustness, use plain text output and instruct the
715
- // model to return a strict JSON object. We previously attempted the
716
- // Responses `json_schema` response_format, but the SDK shape can change
717
- // and is harder to parse reliably; text + JSON.parse is sufficient here.
718
- const requestBase = {
719
- temperature: 0.2,
720
- instructions: systemText,
721
- input: userInput
722
- };
723
- // Optional web_search tool: if the caller sets allow_web truthy, expose
724
- // the built-in web_search capability and let the model decide when to
725
- // call it.
726
- if (body.allow_web) {
727
- coloredLog("allowing web search");
728
- requestBase.tools = [{ type: 'web_search' }];
729
- requestBase.tool_choice = 'auto';
730
- }
731
-
732
- let response;
733
- if (assistantId) {
734
- response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
735
- } else {
736
- response = await openai.responses.create({ model, ...requestBase });
737
- }
738
-
739
- let textOut = '';
740
- try { textOut = response.output_text || ''; } catch (_e) {}
741
- coloredLog("textOut: "+textOut);
742
- if (!textOut && response && Array.isArray(response.output)) {
743
- for (let i = 0; i < response.output.length; i++) {
744
- const item = response.output[i];
745
- if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
746
- const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
747
- if (textPart && (textPart.text || textPart.output_text)) {
748
- textOut = textPart.text || textPart.output_text;
749
- break;
750
- }
751
- }
752
- }
753
- }
754
-
755
- let patch = {};
756
- try {
757
- const jsonText = extractJsonText(textOut);
758
- const parsed = JSON.parse(jsonText || '{}');
759
- patch = parsed.patch || {};
760
- } catch (_e) {
761
- console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
762
- }
763
- coloredLog("patch: "+JSON.stringify(patch));
764
- const filteredPatch = {};
765
- fields.forEach(function (f) {
766
- if (Object.prototype.hasOwnProperty.call(patch, f)) {
767
- filteredPatch[f] = patch[f];
768
- }
769
- });
770
- // If we got no fields back on the first attempt, retry once before
771
- // giving up. Avoid infinite loops by marking a retry flag.
772
- if (!Object.keys(filteredPatch).length && !body._retry) {
773
- coloredLog('[autofill] empty patch, retrying once');
774
- const retryBody = Object.assign({}, body, { _retry: true });
775
- return await self.autofill(retryBody, req, res);
776
- }
777
-
778
- // Optional save
779
- let savedItem = null;
780
- if (body.save_history || body.save_itemtype) {
781
- const targetItemtype = body.save_itemtype || 'ai_response';
782
- if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
783
- const saveObj = {
784
- itemtype: targetItemtype,
785
- name: `[${schemaName}] autofill → ${fields.join(', ')}`,
786
- object_id: object._id,
787
- target_schema: schemaName,
788
- fields,
789
- prompt: userPrompt,
790
- patch: filteredPatch,
791
- model,
792
- raw: { response }
793
- };
794
- await new Promise(function (resolve) {
795
- JOE.Storage.save(saveObj, targetItemtype, function (_err, saved) {
796
- savedItem = saved || null;
797
- resolve();
798
- });
799
- });
800
- }
801
- }
802
-
803
- return {
804
- success: true,
805
- patch: filteredPatch,
806
- model,
807
- usage: response && response.usage,
808
- saved: !!savedItem,
809
- saved_item: savedItem,
810
- elapsed_ms: Date.now() - startedAt
811
- };
812
- } catch (e) {
813
- return { success: false, error: e && e.message || 'Unknown error' };
814
- }
815
- };
816
-
817
- this.getResponse = function(data, req, res) {
818
- try {
819
- var prompt = data.prompt;
820
- if (!prompt) {
821
- return { error: 'No prompt provided' };
822
- }
823
-
824
- // Simulate a response from ChatGPT
825
- var response = `ChatGPT response to: ${prompt}`;
826
- res.jsonp({ response: response });
827
- return { use_callback: true };
828
- } catch (e) {
829
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
830
- }
831
- };
832
-
833
- this.html = function(data, req, res) {
834
- return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
835
- };
836
- /* NEW AI RESPONSE API*/
837
-
838
- this.executeJOEAiPrompt = async function(data, req, res) {
839
- const referencedObjectIds = []; // Track all objects touched during helper function
840
- try {
841
- const promptId = data.ai_prompt;
842
- const params = data;
843
-
844
- if (!promptId) {
845
- return { error: "Missing prompt_id." };
846
- }
847
-
848
- const prompt = await $J.get(promptId); // Use $J.get for consistency
849
- if (!prompt) {
850
- return { error: "Prompt not found." };
851
- }
852
-
853
- let instructions = prompt.instructions || "";
854
- let finalInstructions=instructions;
855
- let finalInput='';
856
- // Pre-load all content_objects if content_items exist
857
- const contentObjects = {};
858
-
859
- if (prompt.content_items && Array.isArray(prompt.content_items)) {
860
- for (const content of prompt.content_items) {
861
- if (params[content.reference]) {
862
- const obj = $J.get(params[content.reference]);
863
- if (obj) {
864
- contentObjects[content.itemtype] = obj;
865
-
866
- // Pre-track referenced object
867
- if (obj._id && !referencedObjectIds.includes(obj._id)) {
868
- referencedObjectIds.push(obj._id);
869
- }
870
- }
871
- }
872
- }
873
- }
874
-
875
- // Execute any helper functions if present
876
- if (prompt.functions) {
877
- const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
878
- const helperResult = await modFunc({
879
- instructions,
880
- params,
881
- ai_prompt: prompt,
882
- content_objects: contentObjects,
883
- trackObject: (obj) => {
884
- if (obj?._id && !referencedObjectIds.includes(obj._id)) {
885
- referencedObjectIds.push(obj._id);
886
- }
887
- }
888
- });
889
-
890
- if (typeof helperResult === 'object' && helperResult.error) {
891
- return { error: helperResult.error };
892
- }
893
-
894
- // Assume the result is { instructions, input }
895
- finalInstructions = helperResult.instructions || instructions;
896
- finalInput = helperResult.input;
897
- }
898
-
899
- const openai = newClient(); // however your OpenAI client is created
900
-
901
- const payload = {
902
- model: prompt.ai_model || "gpt-4o",
903
- instructions: finalInstructions||instructions, // string only
904
- input:finalInput||'',
905
- tools: prompt.tools || [{ "type": "web_search" }],
906
- tool_choice: prompt.tool_choice || "auto",
907
- temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
908
- //return_token_usage: true
909
- //max_tokens: prompt.max_tokens ?? 1200
910
- };
911
-
912
- const response = await openai.responses.create(payload);
913
-
914
-
915
- // const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
916
-
917
- // const response = await openai.chat.completions.create(payload);
918
-
919
- const saved = await saveAiResponseRefactor({
920
- prompt,
921
- ai_response_content: response.output_text || "",
922
- user_prompt: payload.input,
923
- params,
924
- referenced_object_ids: referencedObjectIds,
925
- response_id:response.id,
926
- usage: response.usage || {},
927
- user: req && req.User,
928
- ai_assistant_id: data.ai_assistant_id
929
- });
930
-
931
- return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
932
- } catch (e) {
933
- console.error('❌ executeJOEAiPrompt error:', e);
934
- return { error: "Failed to execute AI prompt.",message: e.message };
935
- }
936
- };
937
-
938
- function createResponsePayload(prompt, params, instructions, user_prompt) {
939
- return {
940
- model: prompt.model || "gpt-4o",
941
- messages: [
942
- { role: "system", content: instructions },
943
- { role: "user", content: user_prompt || "" }
944
- ],
945
- tools: prompt.tools || undefined,
946
- tool_choice: prompt.tool_choice || "auto",
947
- temperature: prompt.temperature ?? 0.7,
948
- max_tokens: prompt.max_tokens ?? 1200
949
- };
950
- }
951
- async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id}) {
952
- var response_keys = [];
953
- try {
954
- response_keys = Object.keys(JSON.parse(ai_response_content));
955
- }catch (e) {
956
- console.error('❌ Error parsing AI response content for keys:', e);
957
- }
958
- // Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
959
- let parsedResponse = null;
960
- try {
961
- const jt = extractJsonText(ai_response_content);
962
- if (jt) {
963
- parsedResponse = JSON.parse(jt);
964
- }
965
- } catch(_e) {
966
- parsedResponse = null;
967
- }
968
- var creator_type = null;
969
- var creator_id = null;
970
- try{
971
- if (ai_assistant_id){
972
- creator_type = 'ai_assistant';
973
- creator_id = ai_assistant_id;
974
- } else if (user && user._id){
975
- creator_type = 'user';
976
- creator_id = user._id;
977
- }
978
- }catch(_e){}
979
- const aiResponse = {
980
- name: `${prompt.name}`,
981
- itemtype: 'ai_response',
982
- ai_prompt: prompt._id,
983
- prompt_name: prompt.name,
984
- prompt_method:prompt.prompt_method,
985
- response: ai_response_content,
986
- response_json: parsedResponse,
987
- response_keys: response_keys,
988
- response_id:response_id||'',
989
- user_prompt: user_prompt,
990
- params_used: params,
991
- usage: usage || {},
992
- tags: prompt.tags || [],
993
- model_used: prompt.ai_model || "gpt-4o",
994
- referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
995
- created: (new Date).toISOString(),
996
- _id: cuid(),
997
- creator_type: creator_type,
998
- creator_id: creator_id
999
- };
1000
-
1001
- await new Promise((resolve, reject) => {
1002
- JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
1003
- if (err) {
1004
- console.error('❌ Error saving AI response:', err);
1005
- reject(err);
1006
- } else {
1007
- console.log('✅ AI response saved successfully');
1008
- resolve(result);
1009
- }
1010
- });
1011
- });
1012
-
1013
- return aiResponse;
1014
- }
1015
-
1016
- // ---------- Widget chat endpoints (Responses API + optional assistants) ----------
1017
- function normalizeMessages(messages) {
1018
- if (!Array.isArray(messages)) { return []; }
1019
- return messages.map(function (m) {
1020
- return {
1021
- role: m.role || 'assistant',
1022
- content: m.content || '',
1023
- created_at: m.created_at || m.created || new Date().toISOString()
1024
- };
1025
- });
1026
- }
1027
-
1028
- /**
1029
- * widgetStart
1030
- *
1031
- * Purpose:
1032
- * Create and persist a new `ai_widget_conversation` record for the
1033
- * external `<joe-ai-widget>` chat component. This is a lightweight
1034
- * conversation record that stores model, assistant, system text and
1035
- * messages for the widget.
1036
- *
1037
- * Inputs (data):
1038
- * - model (optional) override model for the widget
1039
- * - ai_assistant_id (optional) JOE ai_assistant cuid
1040
- * - system (optional) explicit system text
1041
- * - source (optional) freeform source tag, defaults to "widget"
1042
- *
1043
- * OpenAI calls:
1044
- * - None. This endpoint only touches storage.
1045
- *
1046
- * Output:
1047
- * - { success, conversation_id, model, assistant_id }
1048
- * where assistant_id is the OpenAI assistant_id (if present).
1049
- */
1050
- this.widgetStart = async function (data, req, res) {
1051
- try {
1052
- var body = data || {};
1053
- // Default to a modern chat model when no assistant/model is provided.
1054
- // If an assistant is supplied, its ai_model will override this.
1055
- var model = body.model || "gpt-5.1";
1056
- var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
1057
- var system = body.system || (assistant && assistant.instructions) || "";
1058
- // Prefer explicit user fields coming from the client (ai-widget-test page
1059
- // passes _joe.User fields). Widget endpoints no longer infer from req.User
1060
- // to keep a single, explicit source of truth.
1061
- var user = null;
1062
- if (body.user_id || body.user_name || body.user_color) {
1063
- user = {
1064
- _id: body.user_id,
1065
- name: body.user_name,
1066
- fullname: body.user_name,
1067
- color: body.user_color
1068
- };
1069
- }
1070
- var user_color = (body.user_color) || (user && user.color) || null;
1071
-
1072
- var convo = {
1073
- _id: (typeof cuid === 'function') ? cuid() : undefined,
1074
- itemtype: "ai_widget_conversation",
1075
- model: (assistant && assistant.ai_model) || model,
1076
- assistant: assistant && assistant._id,
1077
- assistant_id: assistant && assistant.assistant_id,
1078
- assistant_color: assistant && assistant.assistant_color,
1079
- user: user && user._id,
1080
- user_name: user && (user.fullname || user.name),
1081
- user_color: user_color,
1082
- system: system,
1083
- messages: [],
1084
- source: body.source || "widget",
1085
- created: new Date().toISOString(),
1086
- joeUpdated: new Date().toISOString()
1087
- };
1088
-
1089
- const saved = await new Promise(function (resolve, reject) {
1090
- // Widget conversations are lightweight and do not need full history diffs.
1091
- JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
1092
- if (err) return reject(err);
1093
- resolve(result);
1094
- }, { history: false });
1095
- });
1096
-
1097
- return {
1098
- success: true,
1099
- conversation_id: saved._id,
1100
- model: saved.model,
1101
- assistant_id: saved.assistant_id || null,
1102
- assistant_color: saved.assistant_color || null,
1103
- user_color: saved.user_color || user_color || null
1104
- };
1105
- } catch (e) {
1106
- console.error("[chatgpt] widgetStart error:", e);
1107
- return { success: false, error: e && e.message || "Unknown error" };
1108
- }
1109
- };
1110
-
1111
- /**
1112
- * widgetHistory
1113
- *
1114
- * Purpose:
1115
- * Load an existing `ai_widget_conversation` and normalize its
1116
- * messages for use by `<joe-ai-widget>` on page load or refresh.
1117
- *
1118
- * Inputs (data):
1119
- * - conversation_id or _id: the widget conversation cuid
1120
- *
1121
- * OpenAI calls:
1122
- * - None. Purely storage + normalization.
1123
- *
1124
- * Output:
1125
- * - { success, conversation_id, model, assistant_id, messages }
1126
- */
1127
- this.widgetHistory = async function (data, req, res) {
1128
- try {
1129
- var conversation_id = data.conversation_id || data._id;
1130
- if (!conversation_id) {
1131
- return { success: false, error: "Missing conversation_id" };
1132
- }
1133
- const convo = await new Promise(function (resolve, reject) {
1134
- JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1135
- if (err) return reject(err);
1136
- resolve(results && results[0]);
1137
- });
1138
- });
1139
- if (!convo) {
1140
- return { success: false, error: "Conversation not found" };
1141
- }
1142
-
1143
- convo.messages = normalizeMessages(convo.messages);
1144
- return {
1145
- success: true,
1146
- conversation_id: convo._id,
1147
- model: convo.model,
1148
- assistant_id: convo.assistant_id || null,
1149
- assistant_color: convo.assistant_color || null,
1150
- user_color: convo.user_color || null,
1151
- messages: convo.messages
1152
- };
1153
- } catch (e) {
1154
- console.error("[chatgpt] widgetHistory error:", e);
1155
- return { success: false, error: e && e.message || "Unknown error" };
1156
- }
1157
- };
1158
-
1159
- /**
1160
- * widgetMessage
1161
- *
1162
- * Purpose:
1163
- * Handle a single user turn for `<joe-ai-widget>`:
1164
- * - Append the user message to the stored conversation.
1165
- * - Call OpenAI Responses (optionally with tools from the selected
1166
- * `ai_assistant`, via runWithTools + MCP).
1167
- * - Append the assistant reply, persist the conversation, and return
1168
- * the full message history plus the latest assistant message.
1169
- *
1170
- * Inputs (data):
1171
- * - conversation_id or _id: cuid of the widget conversation
1172
- * - content: user text
1173
- * - role: user role, defaults to "user"
1174
- * - assistant_id: optional OpenAI assistant_id (used only to
1175
- * locate the JOE ai_assistant config)
1176
- * - model: optional model override
1177
- *
1178
- * OpenAI calls:
1179
- * - responses.create (once if no tools; twice when tools are present):
1180
- * * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
1181
- * * Any tool calls are executed via MCP and injected as `tool` messages.
1182
- * * Second call is plain Responses with updated messages.
1183
- *
1184
- * Output:
1185
- * - { success, conversation_id, model, assistant_id, messages,
1186
- * last_message, usage }
1187
- */
1188
- this.widgetMessage = async function (data, req, res) {
1189
- try {
1190
- var body = data || {};
1191
- var conversation_id = body.conversation_id || body._id;
1192
- var content = body.content;
1193
- var role = body.role || "user";
1194
-
1195
- if (!conversation_id || !content) {
1196
- return { success: false, error: "Missing conversation_id or content" };
1197
- }
1198
-
1199
- const convo = await new Promise(function (resolve, reject) {
1200
- JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1201
- if (err) return reject(err);
1202
- resolve(results && results[0]);
1203
- });
1204
- });
1205
- if (!convo) {
1206
- return { success: false, error: "Conversation not found" };
1207
- }
1208
-
1209
- convo.messages = normalizeMessages(convo.messages);
1210
- const nowIso = new Date().toISOString();
1211
-
1212
- // Append user message
1213
- const userMsg = { role: role, content: content, created_at: nowIso };
1214
- convo.messages.push(userMsg);
1215
-
1216
- // Backfill user metadata (id/name/color) on older conversations that
1217
- // were created before we started storing these fields. Prefer explicit
1218
- // body fields only; we no longer infer from req.User so that widget
1219
- // calls always have a single, explicit user source.
1220
- var u = null;
1221
- if (body.user_id || body.user_name || body.user_color) {
1222
- u = {
1223
- _id: body.user_id,
1224
- name: body.user_name,
1225
- fullname: body.user_name,
1226
- color: body.user_color
1227
- };
1228
- }
1229
- if (u) {
1230
- if (!convo.user && u._id) {
1231
- convo.user = u._id;
1232
- }
1233
- if (!convo.user_name && (u.fullname || u.name)) {
1234
- convo.user_name = u.fullname || u.name;
1235
- }
1236
- if (!convo.user_color && u.color) {
1237
- convo.user_color = u.color;
1238
- }
1239
- }
1240
-
1241
- const assistantId = body.assistant_id || convo.assistant_id || null;
1242
- // NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
1243
- // We do NOT pass assistant_id to the Responses API (it is not supported in the
1244
- // version we are using); instead we look up the JOE ai_assistant by assistant_id
1245
- // and inject its configuration (model, instructions, tools) into the request.
1246
- var assistantObj = null;
1247
- if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
1248
- assistantObj = JOE.Data.ai_assistant.find(function (a) {
1249
- return a && a.assistant_id === assistantId;
1250
- }) || null;
1251
- }
1252
- const openai = newClient();
1253
- const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
1254
-
1255
- // Prefer explicit system text on the conversation, then assistant instructions.
1256
- const systemText = (convo.system && String(convo.system)) ||
1257
- (assistantObj && assistantObj.instructions) ||
1258
- "";
1259
- const messagesForModel = convo.messages.map(function (m) {
1260
- return { role: m.role, content: m.content };
1261
- });
1262
-
1263
- // Use runWithTools so that, when an assistant has tools configured,
1264
- // we let the model call those tools via MCP before generating a
1265
- // final response.
1266
- const runResult = await runWithTools({
1267
- openai: openai,
1268
- model: model,
1269
- systemText: systemText,
1270
- messages: messagesForModel,
1271
- assistant: assistantObj,
1272
- req: req
1273
- });
1274
-
1275
- // If tools were called this turn, inject a small meta message so the
1276
- // widget clearly shows which functions ran before the assistant reply.
1277
- if (runResult.toolCalls && runResult.toolCalls.length) {
1278
- const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
1279
- .filter(Boolean)
1280
- .join(', ');
1281
- convo.messages.push({
1282
- role: "assistant",
1283
- meta: "tools_used",
1284
- content: "[Tools used this turn: " + names + "]",
1285
- created_at: nowIso
1286
- });
1287
- }
1288
-
1289
- const assistantText = runResult.finalText || "";
1290
- const assistantMsg = {
1291
- role: "assistant",
1292
- content: assistantText,
1293
- created_at: new Date().toISOString()
1294
- };
1295
- convo.messages.push(assistantMsg);
1296
- convo.last_message_at = assistantMsg.created_at;
1297
- convo.joeUpdated = assistantMsg.created_at;
1298
-
1299
- await new Promise(function (resolve, reject) {
1300
- // Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
1301
- JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
1302
- if (err) return reject(err);
1303
- resolve(saved);
1304
- }, { history: false });
1305
- });
1306
-
1307
- return {
1308
- success: true,
1309
- conversation_id: convo._id,
1310
- model: model,
1311
- assistant_id: assistantId,
1312
- assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
1313
- user_color: convo.user_color || ((u && u.color) || null),
1314
- messages: convo.messages,
1315
- last_message: assistantMsg,
1316
- // Usage comes from the underlying Responses call inside runWithTools.
1317
- usage: (runResult.response && runResult.response.usage) || {}
1318
- };
1319
- } catch (e) {
1320
- console.error("[chatgpt] widgetMessage error:", e);
1321
- return { success: false, error: e && e.message || "Unknown error" };
1322
- }
1323
- };
1324
-
1325
- // Mark async plugin methods so Server.pluginHandling will await them.
1326
- this.async = {
1327
- executeJOEAiPrompt: this.executeJOEAiPrompt,
1328
- testPrompt: this.testPrompt,
1329
- sendInitialConsultTranscript: this.sendInitialConsultTranscript,
1330
- widgetStart: this.widgetStart,
1331
- widgetHistory: this.widgetHistory,
1332
- widgetMessage: this.widgetMessage,
1333
- autofill: this.autofill,
1334
- };
1335
- this.protected = [,'testPrompt'];
1336
- return self;
1337
- }
1338
-
1339
- module.exports = new ChatGPT();
1
+ const OpenAI = require("openai");
2
+ const { google } = require('googleapis');
3
+ const path = require('path');
4
+ const os = require('os');
5
+ const fs = require('fs');
6
+ const MCP = require("../modules/MCP.js");
7
+ // const { name } = require("json-object-editor/server/webconfig");
8
+
9
+ function ChatGPT() {
10
+ // const fetch = (await import('node-fetch')).default;
11
+ //const openai = new OpenAI();
12
+ // Load the service account key JSON file
13
+ const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
14
+ const google_auth = new google.auth.GoogleAuth({
15
+ keyFile: serviceAccountKeyFile,
16
+ scopes: ['https://www.googleapis.com/auth/documents.readonly'],
17
+ });
18
+
19
+ var self = this;
20
+ this.async ={};
21
+ function coloredLog(message){
22
+ console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
23
+ }
24
+ //xx -setup and send a test prompt to chatgpt
25
+ //xx get the api key from joe settings
26
+
27
+ //get a prompt from id
28
+ //send the prompt to chatgpt
29
+
30
+ //++get the cotnent of a file
31
+ //++send the content of a file to chatgpt
32
+
33
+ //++ structure data
34
+ //++ save the response to an ai_repsonse
35
+ //create an ai_response
36
+ //store the content
37
+ //attach to the request
38
+ //store ids sent with the request
39
+ this.default = function(data, req, res) {
40
+ try {
41
+ var payload = {
42
+ params: req.params,
43
+ data: data
44
+ };
45
+ } catch (e) {
46
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
47
+ }
48
+ return payload;
49
+ };
50
+ function getAPIKey() {
51
+ const setting = JOE.Utils.Settings('OPENAI_API_KEY');
52
+ if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
53
+ return setting;
54
+ }
55
+ function getSchemaDef(name) {
56
+ if (!name) return { full: null, summary: null };
57
+ const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
58
+ const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
59
+ return { full, summary };
60
+ }
61
+ /**
62
+ * callMCPTool
63
+ *
64
+ * Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
65
+ * without going over HTTP or worrying about POST size limits.
66
+ *
67
+ * Usage:
68
+ * const result = await callMCPTool('listSchemas', {}, { req });
69
+ *
70
+ * Notes:
71
+ * - `toolName` must exist on MCP.tools.
72
+ * - `params` should be a plain JSON-serializable object.
73
+ * - `ctx` is optional and can pass `{ req }` or other context that MCP
74
+ * tools might want (for auth, user, etc.).
75
+ */
76
+ async function callMCPTool(toolName, params = {}, ctx = {}) {
77
+ if (!MCP || !MCP.tools) {
78
+ throw new Error("MCP module not initialized; cannot call MCP tool");
79
+ }
80
+ if (!toolName || typeof toolName !== 'string') {
81
+ throw new Error("Missing or invalid MCP tool name");
82
+ }
83
+ const fn = MCP.tools[toolName];
84
+ if (typeof fn !== 'function') {
85
+ throw new Error(`MCP tool "${toolName}" not found`);
86
+ }
87
+ try {
88
+ // All MCP tools accept (params, ctx) and return a JSON-serializable result.
89
+ // The Responses / tools API often returns arguments as a JSON string, so
90
+ // normalize that here before invoking the tool.
91
+ let toolParams = params;
92
+ if (typeof toolParams === 'string') {
93
+ try {
94
+ toolParams = JSON.parse(toolParams);
95
+ } catch (parseErr) {
96
+ console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
97
+ // Fall back to passing the raw string so tools that expect it still work.
98
+ }
99
+ }
100
+ const result = await fn(toolParams || {}, ctx || {});
101
+ return result;
102
+ } catch (e) {
103
+ // Surface a clean error upstream but keep details in logs.
104
+ console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
105
+ throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
106
+ }
107
+ }
108
+
109
+ /**
110
+ * extractToolCalls
111
+ *
112
+ * Best-effort parser for tool calls from a Responses API result.
113
+ * The Responses output shape may evolve; this function looks for
114
+ * any "tool_call" typed content in response.output[*].content[*]
115
+ * and normalizes it into `{ name, arguments }` objects.
116
+ */
117
+ function extractToolCalls(response) {
118
+ var calls = [];
119
+ if (!response || !Array.isArray(response.output)) { return calls; }
120
+
121
+ response.output.forEach(function (item) {
122
+ if (!item) { return; }
123
+ // v1-style: item.type === 'tool_call'
124
+ if (item.type === 'function_call') {
125
+ calls.push({
126
+ name: item.name || item.function_name,
127
+ arguments: item.arguments || item.function_arguments || {}
128
+ });
129
+ }
130
+ // message-style: item.content is an array of parts
131
+ if (Array.isArray(item.content)) {
132
+ item.content.forEach(function (part) {
133
+ if (!part) { return; }
134
+ if (part.type === 'function_call') {
135
+ calls.push({
136
+ name: part.name || part.tool_name,
137
+ arguments: part.arguments || part.args || {}
138
+ });
139
+ }
140
+ });
141
+ }
142
+ });
143
+
144
+ return calls;
145
+ }
146
+
147
+ // Detect "request too large / token limit" style errors from the Responses API.
148
+ function isTokenLimitError(err) {
149
+ if (!err || typeof err !== 'object') return false;
150
+ if (err.status !== 429 && err.status !== 400) return false;
151
+ const msg = (err.error && err.error.message) || err.message || '';
152
+ if (!msg) return false;
153
+ const lower = String(msg).toLowerCase();
154
+ // Cover common phrasing from OpenAI for context/TPM limits.
155
+ return (
156
+ lower.includes('request too large') ||
157
+ lower.includes('too many tokens') ||
158
+ lower.includes('max tokens') ||
159
+ lower.includes('maximum context length') ||
160
+ lower.includes('tokens per min')
161
+ );
162
+ }
163
+
164
+ // Create a compact representation of a JOE object for use in slim payloads.
165
+ function slimJOEObject(item) {
166
+ if (!item || typeof item !== 'object') return item;
167
+ const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
168
+ const info = item.info || item.description || item.summary || '';
169
+ return {
170
+ _id: item._id,
171
+ itemtype: item.itemtype,
172
+ name: name,
173
+ info: info
174
+ };
175
+ }
176
+
177
+ // Given an `understandObject` result, produce a slimmed version:
178
+ // - keep `object` as-is
179
+ // - keep `flattened` for the main object (depth-limited) if present
180
+ // - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
181
+ // - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
182
+ function slimUnderstandObjectResult(result) {
183
+ if (!result || typeof result !== 'object') return result;
184
+ const out = {
185
+ _id: result._id,
186
+ itemtype: result.itemtype,
187
+ object: result.object,
188
+ // retain main flattened view if available; this is typically much smaller
189
+ flattened: result.flattened || null,
190
+ schemas: result.schemas || {},
191
+ tags: result.tags || {},
192
+ statuses: result.statuses || {},
193
+ slim: true
194
+ };
195
+ if (Array.isArray(result.related)) {
196
+ out.related = result.related.map(function (rel) {
197
+ if (!rel) return rel;
198
+ const base = rel.object || {};
199
+ const slim = slimJOEObject(base);
200
+ return {
201
+ field: rel.field,
202
+ _id: slim && slim._id || rel._id,
203
+ itemtype: slim && slim.itemtype || rel.itemtype,
204
+ object: slim
205
+ };
206
+ });
207
+ } else {
208
+ out.related = [];
209
+ }
210
+ return out;
211
+ }
212
+
213
+ // Walk the messages array and, for any system message containing a JSON payload
214
+ // of the form { "tool": "understandObject", "result": {...} }, replace the
215
+ // result with a slimmed version to reduce token count. Returns a new array; if
216
+ // nothing was changed, returns the original array.
217
+ function shrinkUnderstandObjectMessagesForTokens(messages) {
218
+ if (!Array.isArray(messages)) return messages;
219
+ let changed = false;
220
+ const shrunk = messages.map(function (msg) {
221
+ if (!msg || msg.role !== 'system') return msg;
222
+ if (typeof msg.content !== 'string') return msg;
223
+ try {
224
+ const parsed = JSON.parse(msg.content);
225
+ if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
226
+ return msg;
227
+ }
228
+ const slimmed = slimUnderstandObjectResult(parsed.result);
229
+ changed = true;
230
+ return {
231
+ ...msg,
232
+ content: JSON.stringify({ tool: 'understandObject', result: slimmed })
233
+ };
234
+ } catch (_e) {
235
+ return msg;
236
+ }
237
+ });
238
+ return changed ? shrunk : messages;
239
+ }
240
+
241
+ /**
242
+ * runWithTools
243
+ *
244
+ * Single orchestration function for calling the OpenAI Responses API
245
+ * with optional tools (sourced from a JOE `ai_assistant`), handling
246
+ * tool calls via MCP, and issuing a follow-up model call with the
247
+ * tool results injected.
248
+ *
249
+ * Inputs (opts):
250
+ * - openai: OpenAI client instance
251
+ * - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
252
+ * - systemText: string of system / instructions text
253
+ * - messages: array of { role, content } for the conversation so far
254
+ * - assistant: JOE `ai_assistant` object (may contain `tools`)
255
+ * - req: Express request (passed into MCP tools as context)
256
+ *
257
+ * Returns:
258
+ * - { response, finalText, messages, toolCalls }
259
+ * where `finalText` is the assistant-facing text (from output_text)
260
+ * and `messages` is the possibly-extended message list including
261
+ * any synthetic `tool` messages.
262
+ */
263
+ async function runWithTools(opts) {
264
+ const openai = opts.openai;
265
+ const model = opts.model;
266
+ const systemText = opts.systemText || "";
267
+ const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
268
+ const assistant = opts.assistant || null;
269
+ const req = opts.req;
270
+ const attachmentsMode = opts.attachments_mode || null;
271
+ const openaiFileIds = opts.openai_file_ids || null;
272
+
273
+ // Normalize tools: in many schemas tools may be stored as a JSON string;
274
+ // here we accept either an array or a JSON-stringified array.
275
+ let tools = null;
276
+ if (assistant && assistant.tools) {
277
+ if (Array.isArray(assistant.tools)) {
278
+ tools = assistant.tools;
279
+ } else if (typeof assistant.tools === 'string') {
280
+ try {
281
+ const parsed = JSON.parse(assistant.tools);
282
+ if (Array.isArray(parsed)) {
283
+ tools = parsed;
284
+ }
285
+ } catch (e) {
286
+ console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
287
+ }
288
+ }
289
+ }
290
+ // Normalize tool definitions for the Responses API. The assistant UI
291
+ // uses the Assistants-style shape ({ type:'function', function:{...} }),
292
+ // but Responses expects the name/description/parameters at the top level:
293
+ // { type:'function', name:'x', description:'...', parameters:{...} }
294
+ if (Array.isArray(tools)) {
295
+ tools = tools.map(function (t) {
296
+ if (t && t.type === 'function' && t.function && !t.name) {
297
+ const fn = t.function || {};
298
+ return {
299
+ type: 'function',
300
+ name: fn.name,
301
+ description: fn.description,
302
+ parameters: fn.parameters || {}
303
+ };
304
+ }
305
+ return t;
306
+ });
307
+ }
308
+
309
+ // No tools configured – do a simple single Responses call.
310
+ if (!tools) {
311
+ const resp = await openai.responses.create({
312
+ model: model,
313
+ instructions: systemText,
314
+ input: messages
315
+ });
316
+ return {
317
+ response: resp,
318
+ finalText: resp.output_text || "",
319
+ messages: messages,
320
+ toolCalls: []
321
+ };
322
+ }
323
+
324
+ // Step 1: call the model with tools enabled.
325
+ let firstPayload = {
326
+ model: model,
327
+ instructions: systemText,
328
+ input: messages,
329
+ tools: tools,
330
+ tool_choice: "auto"
331
+ };
332
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
333
+ try{
334
+ firstPayload = await attachFilesToResponsesPayload(openai, firstPayload, {
335
+ attachments_mode: attachmentsMode,
336
+ openai_file_ids: openaiFileIds
337
+ });
338
+ }catch(e){
339
+ console.warn('[chatgpt] runWithTools attachments failed; continuing without attachments', e && e.message || e);
340
+ }
341
+ }
342
+ const first = await openai.responses.create(firstPayload);
343
+
344
+ const toolCalls = extractToolCalls(first);
345
+
346
+ // If the model didn't decide to use tools, just return the first answer.
347
+ if (!toolCalls.length) {
348
+ return {
349
+ response: first,
350
+ finalText: first.output_text || "",
351
+ messages: messages,
352
+ toolCalls: []
353
+ };
354
+ }
355
+
356
+ // Step 2: execute each tool call via MCP and append tool results.
357
+ for (let i = 0; i < toolCalls.length; i++) {
358
+ const tc = toolCalls[i];
359
+ try {
360
+ const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
361
+ messages.push({
362
+ // Responses API does not support a "tool" role in messages.
363
+ // We inject tool outputs as a synthetic system message so
364
+ // the model can see the results without affecting the
365
+ // user/assistant turn structure.
366
+ role: "system",
367
+ content: JSON.stringify({ tool: tc.name, result: result })
368
+ });
369
+ } catch (e) {
370
+ console.error("[chatgpt] MCP tool error in runWithTools:", e);
371
+ messages.push({
372
+ role: "system",
373
+ content: JSON.stringify({
374
+ tool: tc.name,
375
+ error: e && e.message || "Tool execution failed"
376
+ })
377
+ });
378
+ }
379
+ }
380
+
381
+ // Step 3: ask the model again with tool outputs included.
382
+ let finalMessages = messages;
383
+ let second;
384
+ try {
385
+ let secondPayload = {
386
+ model: model,
387
+ instructions: systemText,
388
+ input: finalMessages
389
+ };
390
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
391
+ try{
392
+ secondPayload = await attachFilesToResponsesPayload(openai, secondPayload, {
393
+ attachments_mode: attachmentsMode,
394
+ openai_file_ids: openaiFileIds
395
+ });
396
+ }catch(e){
397
+ console.warn('[chatgpt] runWithTools second-call attachments failed; continuing without attachments', e && e.message || e);
398
+ }
399
+ }
400
+ second = await openai.responses.create(secondPayload);
401
+ } catch (e) {
402
+ if (isTokenLimitError(e)) {
403
+ console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
404
+ const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
405
+ // If nothing was shrunk, just rethrow the original error.
406
+ if (shrunk === finalMessages) {
407
+ throw e;
408
+ }
409
+ finalMessages = shrunk;
410
+ // Retry once with the smaller payload; let any error bubble up.
411
+ let retryPayload = {
412
+ model: model,
413
+ instructions: systemText,
414
+ input: finalMessages
415
+ };
416
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
417
+ try{
418
+ retryPayload = await attachFilesToResponsesPayload(openai, retryPayload, {
419
+ attachments_mode: attachmentsMode,
420
+ openai_file_ids: openaiFileIds
421
+ });
422
+ }catch(e2){
423
+ console.warn('[chatgpt] runWithTools retry attachments failed; continuing without attachments', e2 && e2.message || e2);
424
+ }
425
+ }
426
+ second = await openai.responses.create(retryPayload);
427
+ } else {
428
+ throw e;
429
+ }
430
+ }
431
+
432
+ return {
433
+ response: second,
434
+ finalText: second.output_text || "",
435
+ messages: finalMessages,
436
+ toolCalls: toolCalls
437
+ };
438
+ }
439
+
440
+ // function newClient(){
441
+ // var key = getAPIKey();
442
+ // var c = new OpenAI({
443
+ // apiKey: key, // This is the default and can be omitted
444
+ // });
445
+ // if(!c || !c.apiKey){
446
+ // return { errors: 'No API key provided' };
447
+ // }
448
+ // return c;
449
+ // }
450
+ function newClient() {
451
+ return new OpenAI({ apiKey: getAPIKey() });
452
+ }
453
+
454
+ // Safely call Responses API with optional temperature/top_p.
455
+ // If the model rejects these parameters, strip and retry once.
456
+ async function safeResponsesCreate(openai, payload){
457
+ try{
458
+ return await openai.responses.create(payload);
459
+ }catch(e){
460
+ try{
461
+ var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
462
+ var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
463
+ var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
464
+ var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
465
+ var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
466
+ if (badTemp || badTopP || unknownTemp || unknownTopP){
467
+ var p2 = Object.assign({}, payload);
468
+ if (p2.hasOwnProperty('temperature')) delete p2.temperature;
469
+ if (p2.hasOwnProperty('top_p')) delete p2.top_p;
470
+ console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
471
+ return await openai.responses.create(p2);
472
+ }
473
+ }catch(_e){ /* fallthrough */ }
474
+ throw e;
475
+ }
476
+ }
477
+
478
+ // Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
479
+ async function ensureVectorStoreForFiles(fileIds = []){
480
+ const openai = newClient();
481
+ // Create ephemeral store per run (could be optimized to reuse/persist later)
482
+ const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
483
+ const storeId = vs.id;
484
+ // Link files by id
485
+ for (const fid of (fileIds||[]).slice(0,10)) {
486
+ try{
487
+ await openai.vectorStores.files.create(storeId, { file_id: fid });
488
+ }catch(e){
489
+ console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
490
+ }
491
+ }
492
+ // Poll (best-effort) until files are processed or timeout
493
+ const timeoutMs = 8000;
494
+ const start = Date.now();
495
+ try{
496
+ while(Date.now() - start < timeoutMs){
497
+ const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
498
+ const items = (listed && listed.data) || [];
499
+ const pending = items.some(f => f.status && f.status !== 'completed');
500
+ if(!pending){ break; }
501
+ await new Promise(r => setTimeout(r, 500));
502
+ }
503
+ }catch(_e){ /* non-fatal */ }
504
+ return { vectorStoreId: storeId };
505
+ }
506
+
507
+ // ---------------- OpenAI Files helpers ----------------
508
+ /**
509
+ * attachFilesToResponsesPayload
510
+ *
511
+ * Shared helper to wire OpenAI `responses.create` payloads with file
512
+ * attachments in a consistent way for both MCP and non‑MCP paths.
513
+ *
514
+ * Modes:
515
+ * - attachments_mode === 'file_search':
516
+ * - Ensures a temporary vector store via ensureVectorStoreForFiles.
517
+ * - Adds a `file_search` tool to payload.tools (if not already present).
518
+ * - Sets payload.tool_resources.file_search.vector_store_ids.
519
+ * - Leaves payload.input as text/messages.
520
+ *
521
+ * - attachments_mode === 'direct' (default):
522
+ * - Converts the existing `input` string (if any) into an `input_text`
523
+ * part and appends up to 10 `{ type:'input_file', file_id }` parts.
524
+ * - Sets payload.input = [{ role:'user', content: parts }].
525
+ *
526
+ * This function is intentionally file‑only; it does not modify instructions
527
+ * or other payload fields.
528
+ */
529
+ async function attachFilesToResponsesPayload(openai, payload, opts){
530
+ const mode = (opts && opts.attachments_mode) || 'direct';
531
+ const fileIds = (opts && opts.openai_file_ids) || [];
532
+ if (!Array.isArray(fileIds) || !fileIds.length) {
533
+ return payload;
534
+ }
535
+ if (mode === 'file_search') {
536
+ const ensured = await ensureVectorStoreForFiles(fileIds);
537
+ payload.tools = payload.tools || [];
538
+ if (!payload.tools.find(function(t){ return t && t.type === 'file_search'; })) {
539
+ payload.tools.push({ type:'file_search' });
540
+ }
541
+ payload.tool_resources = Object.assign({}, payload.tool_resources, {
542
+ file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
543
+ });
544
+ return payload;
545
+ }
546
+ // Default: direct context stuffing using input_text + input_file parts.
547
+ const parts = [];
548
+ if (typeof payload.input === 'string' && payload.input.trim().length) {
549
+ parts.push({ type:'input_text', text: String(payload.input) });
550
+ } else if (Array.isArray(payload.input)) {
551
+ // If caller already provided messages as input, preserve them by
552
+ // flattening into input_text where possible.
553
+ try{
554
+ const txt = JSON.stringify(payload.input);
555
+ if (txt && txt.length) {
556
+ parts.push({ type:'input_text', text: txt });
557
+ }
558
+ }catch(_e){}
559
+ }
560
+ fileIds.slice(0, 10).forEach(function(fid){
561
+ if (fid) {
562
+ parts.push({ type:'input_file', file_id: fid });
563
+ }
564
+ });
565
+ payload.input = [{ role:'user', content: parts }];
566
+ return payload;
567
+ }
568
+ async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
569
+ const openai = newClient();
570
+ const usePurpose = purpose || 'assistants';
571
+ const tmpDir = os.tmpdir();
572
+ const safeName = filename || ('upload_' + Date.now());
573
+ const tmpPath = path.join(tmpDir, safeName);
574
+ await fs.promises.writeFile(tmpPath, buffer);
575
+ try {
576
+ // openai.files.create accepts a readable stream
577
+ const fileStream = fs.createReadStream(tmpPath);
578
+ const created = await openai.files.create({
579
+ purpose: usePurpose,
580
+ file: fileStream
581
+ });
582
+ return { id: created.id, purpose: usePurpose };
583
+ } finally {
584
+ // best-effort cleanup
585
+ fs.promises.unlink(tmpPath).catch(()=>{});
586
+ }
587
+ }
588
+
589
+ // Expose a helper that other plugins can call in-process
590
+ this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
591
+ if (!buffer || !buffer.length) {
592
+ throw new Error('Missing buffer');
593
+ }
594
+ return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
595
+ };
596
+
597
+ // Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
598
+ this.filesRetryFromUrl = async function (data, req, res) {
599
+ try {
600
+ const { default: got } = await import('got');
601
+ const url = data && (data.url || data.location);
602
+ const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
603
+ const contentType = data && data.contentType || undefined;
604
+ const purpose = 'assistants';
605
+ if (!url) {
606
+ return { success: false, error: 'Missing url' };
607
+ }
608
+ const resp = await got(url, { responseType: 'buffer' });
609
+ const buffer = resp.body;
610
+ const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
611
+ return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
612
+ } catch (e) {
613
+ return { success: false, error: e && e.message || 'Retry upload failed' };
614
+ }
615
+ };
616
+ this.testPrompt= async function(data, req, res) {
617
+ try {
618
+ var payload = {
619
+ params: req.params,
620
+ data: data
621
+ };
622
+ } catch (e) {
623
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
624
+ }
625
+ const client = newClient();
626
+ if(client.errors){
627
+ return { errors: client.errors };
628
+ }
629
+ try {
630
+ const chatCompletion = await client.chat.completions.create({
631
+ messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
632
+ model: 'gpt-4o',
633
+ });
634
+ coloredLog(chatCompletion);
635
+ const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
636
+ // Optionally persist as ai_response with parsed JSON when applicable
637
+ const parsed = (function(){
638
+ try {
639
+ const jt = extractJsonText(text);
640
+ return jt ? JSON.parse(jt) : null;
641
+ } catch(_e){ return null; }
642
+ })();
643
+ try {
644
+ var creator_type = null;
645
+ var creator_id = null;
646
+ try{
647
+ var u = req && req.User;
648
+ if (u && u._id){
649
+ creator_type = 'user';
650
+ creator_id = u._id;
651
+ }
652
+ }catch(_e){}
653
+ const aiResponse = {
654
+ itemtype: 'ai_response',
655
+ name: 'Test Prompt ChatGPT',
656
+ response_type: 'testPrompt',
657
+ response: text,
658
+ response_json: parsed,
659
+ response_id: chatCompletion.id || '',
660
+ user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
661
+ model_used: 'gpt-4o',
662
+ created: (new Date()).toISOString(),
663
+ creator_type: creator_type,
664
+ creator_id: creator_id
665
+ };
666
+ JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
667
+ } catch(_e){ /* best-effort only */ }
668
+ return {payload,chatCompletion,content:text};
669
+ } catch (error) {
670
+ if (error.status === 429) {
671
+ return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
672
+ } else {
673
+ return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
674
+ }
675
+ }
676
+ }
677
+
678
+ this.sendInitialConsultTranscript= async function(data, req, res) {
679
+ coloredLog("sendInitialConsultTranscript");
680
+ //get the prompt object from the prompt id
681
+ //get the business object from the refrenced object id
682
+ //see if there is a initial_transcript_url property on that object
683
+ //if there is, get the content of the file
684
+ //send the content to chatgpt, with the template property of the prompt object
685
+ //get the response
686
+ try {
687
+ var payload = {
688
+ params: req.params,
689
+ data: data
690
+ };
691
+ } catch (e) {
692
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
693
+ }
694
+ var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
695
+ var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
696
+
697
+
698
+ // See if there is an initial_transcript_url property on that object
699
+ const transcriptUrl = businessOBJ.initial_transcript_url;
700
+ if (!transcriptUrl) {
701
+ return res.jsonp({ error: 'No initial transcript URL found' });
702
+ }
703
+
704
+ //Get the content of the file from Google Docs
705
+ const transcriptContent = await getGoogleDocContent(transcriptUrl);
706
+ if (!transcriptContent || transcriptContent.error) {
707
+ return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
708
+ }
709
+ const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
710
+ payload.tokenCount = tokenCount;
711
+ coloredLog("token count: "+tokenCount);
712
+ //return res.jsonp({tokens:tokenCount,content:transcriptContent});
713
+ // Send the content to ChatGPT, with the template property of the prompt object
714
+ const client = new OpenAI({
715
+ apiKey: getAPIKey(), // This is the default and can be omitted
716
+ });
717
+
718
+ const chatResponse = await client.chat.completions.create({
719
+ messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
720
+ model: 'gpt-4o',
721
+ });
722
+
723
+ // Get the response
724
+ const chatContent = chatResponse.choices[0].message.content;
725
+ const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
726
+ // Save the response
727
+ await saveAIResponse({
728
+ name:responseName,
729
+ business: data.business,
730
+ ai_prompt: data.ai_prompt,
731
+ response: chatContent,
732
+ payload,
733
+ prompt_method:req.params.method
734
+ }, req && req.User);
735
+ coloredLog("response saved -"+responseName);
736
+ return {payload,
737
+ businessOBJ,
738
+ promptOBJ,
739
+ chatContent,
740
+ responseName
741
+ };
742
+
743
+ }
744
+
745
+ async function getGoogleDocContent(docUrl) {
746
+ try {
747
+ const auth = new google.auth.GoogleAuth({
748
+ scopes: ['https://www.googleapis.com/auth/documents.readonly']
749
+ });
750
+ //get google docs apikey from settings
751
+ const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
752
+ const docs = google.docs({ version: 'v1', auth:google_auth });
753
+ const docId = extractDocIdFromUrl(docUrl);
754
+ const doc = await docs.documents.get({ documentId: docId });
755
+
756
+ let content = doc.data.body.content.map(element => {
757
+ if (element.paragraph && element.paragraph.elements) {
758
+ return element.paragraph.elements.map(
759
+ e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
760
+ : ''
761
+ ).join('');
762
+ }
763
+ return '';
764
+ }).join('\n');
765
+
766
+ // Remove timestamps and line numbers
767
+ //content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
768
+
769
+ return content;
770
+ } catch (error) {
771
+ console.error('Error fetching Google Doc content:', error);
772
+ return {error};
773
+ }
774
+ }
775
+ function countTokens(text, model = 'gpt-4o') {
776
+ const enc = encoding_for_model(model);
777
+ const tokens = enc.encode(text);
778
+ return tokens.length;
779
+ }
780
+ function extractDocIdFromUrl(url) {
781
+ const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
782
+ return match ? match[1] : null;
783
+ }
784
+
785
+ async function saveAIResponse(data, user) {
786
+ try {
787
+ var creator_type = null;
788
+ var creator_id = null;
789
+ try{
790
+ if (user && user._id){
791
+ creator_type = 'user';
792
+ creator_id = user._id;
793
+ }
794
+ }catch(_e){}
795
+ const aiResponse = {
796
+ name: data.name,
797
+ itemtype: 'ai_response',
798
+ business: data.business,
799
+ ai_prompt: data.ai_prompt,
800
+ response: data.response,
801
+ payload: data.payload,
802
+ prompt_method:data.prompt_method,
803
+ created: (new Date).toISOString(),
804
+ _id:cuid(),
805
+ creator_type: creator_type,
806
+ creator_id: creator_id
807
+ // Add any other fields you want to save
808
+ };
809
+ await new Promise((resolve, reject) => {
810
+ JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
811
+ if (err) {
812
+ coloredLog('Error saving AI response: ' + err);
813
+ reject(err);
814
+ } else {
815
+ coloredLog('AI response saved successfully');
816
+ resolve(result);
817
+ }
818
+ });
819
+ });
820
+ } catch (error) {
821
+ coloredLog('Error in saveAIResponse: ' + error);
822
+ }
823
+ }
824
+
825
+ // Normalize model output that should contain JSON. Models often wrap JSON
826
+ // in markdown fences (```json ... ```), and may prepend/append prose. This
827
+ // helper strips fences and tries to isolate the first well-formed JSON
828
+ // object/array substring so JSON.parse has the best chance of succeeding.
829
+ function extractJsonText(raw) {
830
+ if (!raw) { return ''; }
831
+ let t = String(raw).trim();
832
+ // If there is any ```...``` fenced block, prefer its contents.
833
+ const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
834
+ if (fenceIdx !== -1) {
835
+ let start = fenceIdx;
836
+ const firstNewline = t.indexOf('\n', start);
837
+ if (firstNewline !== -1) {
838
+ t = t.substring(firstNewline + 1);
839
+ } else {
840
+ t = t.substring(start + 3);
841
+ }
842
+ const lastFence = t.lastIndexOf('```');
843
+ if (lastFence !== -1) {
844
+ t = t.substring(0, lastFence);
845
+ }
846
+ t = t.trim();
847
+ }
848
+ // If there's extra prose around the JSON, slice from first {/[ to last }/]
849
+ if (t[0] !== '{' && t[0] !== '[') {
850
+ const firstBrace = t.indexOf('{');
851
+ const firstBracket = t.indexOf('[');
852
+ let first = -1;
853
+ if (firstBrace === -1) { first = firstBracket; }
854
+ else if (firstBracket === -1) { first = firstBrace; }
855
+ else { first = Math.min(firstBrace, firstBracket); }
856
+ const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
857
+ if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
858
+ t = t.slice(first, lastBrace + 1);
859
+ }
860
+ }
861
+ return t.trim();
862
+ }
863
+
864
+ // Autofill feature (Responses API; supports assistant_id or model)
865
+ this.autofill = async function (data, req, res) {
866
+ const startedAt = Date.now();
867
+ try {
868
+ const body = data || {};
869
+ const objectId = body.object_id || body._id;
870
+ const object = body.object || $J.get(objectId);
871
+ const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
872
+ const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
873
+ const rawFields = body.fields || body.field;
874
+ const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
875
+ const userPrompt = body.prompt || '';
876
+ const assistantId = body.assistant_id || null;
877
+
878
+ if (!object) {
879
+ return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
880
+ }
881
+ if (!schemaName) {
882
+ return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
883
+ }
884
+ if (!fields.length) {
885
+ return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
886
+ }
887
+
888
+ const flattened = JOE.Utils.flattenObject(object._id);
889
+ const systemText = [
890
+ 'You are JOE (Json Object Editor) assistant.',
891
+ 'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
892
+ '- Respect field types (text, number, arrays, enums, references).',
893
+ '- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
894
+ '- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
895
+ '- If a field is an array, return an array of values.',
896
+ '- Never modify unrelated fields.',
897
+ '- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
898
+ '- If you lack sufficient information, return an empty patch.'
899
+ ].join('\\n');
900
+
901
+ const schemaForContext = schemaSummary || schemaFull || {};
902
+ const userInput = JSON.stringify({
903
+ action: 'autofill_fields',
904
+ target_schema: schemaName,
905
+ requested_fields: fields,
906
+ user_prompt: userPrompt,
907
+ object_context: flattened,
908
+ schema_context: schemaForContext
909
+ }, null, ' ');
910
+
911
+ const openai = newClient();
912
+ const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
913
+
914
+ // Normalize MCP options for autofill. By default, when mcp_enabled is
915
+ // true we expose the read-only toolset, which is safe for field
916
+ // suggestions. Callers can override toolset / selected tools.
917
+ const mcpEnabled = !!body.mcp_enabled;
918
+ const mcpToolset = body.mcp_toolset || 'read-only';
919
+ const mcpSelected = Array.isArray(body.mcp_selected_tools) ? body.mcp_selected_tools : null;
920
+ const mcpInstructionsMode = body.mcp_instructions_mode || 'auto';
921
+
922
+ let response;
923
+ let mcpToolCalls = [];
924
+ if (mcpEnabled) {
925
+ const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
926
+ const toolsForModel = MCP.getToolDefinitions(toolNames);
927
+ const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
928
+ const systemTextWithMcp = [systemText, mcpText || ''].join('\n').trim();
929
+
930
+ const messages = [{ role:'user', content:userInput }];
931
+
932
+ const runResult = await runWithTools({
933
+ openai: openai,
934
+ model: model,
935
+ systemText: systemTextWithMcp,
936
+ messages: messages,
937
+ assistant: { tools: toolsForModel },
938
+ req: req
939
+ });
940
+ response = runResult.response;
941
+ if (runResult && Array.isArray(runResult.toolCalls)) {
942
+ mcpToolCalls = runResult.toolCalls.map(function(tc){
943
+ return {
944
+ name: tc && (tc.name || tc.function_name || tc.tool_name),
945
+ arguments: tc && tc.arguments
946
+ };
947
+ }).filter(function(x){ return x && x.name; });
948
+ }
949
+ } else {
950
+ // For simplicity and robustness, use plain text output and instruct the
951
+ // model to return a strict JSON object. We previously attempted the
952
+ // Responses `json_schema` response_format, but the SDK shape can change
953
+ // and is harder to parse reliably; text + JSON.parse is sufficient here.
954
+ const requestBase = {
955
+ temperature: 0.2,
956
+ instructions: systemText,
957
+ input: userInput
958
+ };
959
+ // Optional web_search tool: if the caller sets allow_web truthy, expose
960
+ // the built-in web_search capability and let the model decide when to
961
+ // call it.
962
+ if (body.allow_web) {
963
+ coloredLog("allowing web search");
964
+ requestBase.tools = [{ type: 'web_search' }];
965
+ requestBase.tool_choice = 'auto';
966
+ }
967
+
968
+ if (assistantId) {
969
+ response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
970
+ } else {
971
+ response = await openai.responses.create({ model, ...requestBase });
972
+ }
973
+ }
974
+
975
+ let textOut = '';
976
+ try { textOut = response.output_text || ''; } catch (_e) {}
977
+ coloredLog("textOut: "+textOut);
978
+ if (!textOut && response && Array.isArray(response.output)) {
979
+ for (let i = 0; i < response.output.length; i++) {
980
+ const item = response.output[i];
981
+ if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
982
+ const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
983
+ if (textPart && (textPart.text || textPart.output_text)) {
984
+ textOut = textPart.text || textPart.output_text;
985
+ break;
986
+ }
987
+ }
988
+ }
989
+ }
990
+
991
+ let patch = {};
992
+ try {
993
+ const jsonText = extractJsonText(textOut);
994
+ const parsed = JSON.parse(jsonText || '{}');
995
+ patch = parsed.patch || {};
996
+ } catch (_e) {
997
+ console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
998
+ }
999
+ coloredLog("patch: "+JSON.stringify(patch));
1000
+ const filteredPatch = {};
1001
+ fields.forEach(function (f) {
1002
+ if (Object.prototype.hasOwnProperty.call(patch, f)) {
1003
+ filteredPatch[f] = patch[f];
1004
+ }
1005
+ });
1006
+ // If we got no fields back on the first attempt, retry once before
1007
+ // giving up. Avoid infinite loops by marking a retry flag.
1008
+ if (!Object.keys(filteredPatch).length && !body._retry) {
1009
+ coloredLog('[autofill] empty patch, retrying once');
1010
+ const retryBody = Object.assign({}, body, { _retry: true });
1011
+ return await self.autofill(retryBody, req, res);
1012
+ }
1013
+
1014
+ // Optional save
1015
+ let savedItem = null;
1016
+ if (body.save_history || body.save_itemtype) {
1017
+ const targetItemtype = body.save_itemtype || 'ai_response';
1018
+ if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
1019
+ const isAiResponse = (targetItemtype === 'ai_response');
1020
+ const toolNamesForSave = mcpEnabled ? MCP.getToolNamesForToolset(mcpToolset, mcpSelected) : [];
1021
+ const baseSave = {
1022
+ itemtype: targetItemtype,
1023
+ name: `[${schemaName}] autofill ${fields.join(', ')}`,
1024
+ object_id: object._id,
1025
+ target_schema: schemaName,
1026
+ fields,
1027
+ prompt: userPrompt,
1028
+ patch: filteredPatch,
1029
+ model,
1030
+ raw: { response, mcp_tools_used: mcpToolCalls }
1031
+ };
1032
+ if (isAiResponse) {
1033
+ baseSave.mcp_enabled = mcpEnabled;
1034
+ baseSave.mcp_toolset = mcpToolset;
1035
+ baseSave.mcp_selected_tools = toolNamesForSave;
1036
+ baseSave.mcp_instructions_mode = mcpInstructionsMode;
1037
+ baseSave.mcp_tools_used = mcpToolCalls;
1038
+ }
1039
+ await new Promise(function (resolve) {
1040
+ JOE.Storage.save(baseSave, targetItemtype, function (_err, saved) {
1041
+ savedItem = saved || null;
1042
+ resolve();
1043
+ });
1044
+ });
1045
+ }
1046
+ }
1047
+
1048
+ return {
1049
+ success: true,
1050
+ patch: filteredPatch,
1051
+ model,
1052
+ usage: response && response.usage,
1053
+ saved: !!savedItem,
1054
+ saved_item: savedItem,
1055
+ elapsed_ms: Date.now() - startedAt
1056
+ };
1057
+ } catch (e) {
1058
+ return { success: false, error: e && e.message || 'Unknown error' };
1059
+ }
1060
+ };
1061
+
1062
+ this.getResponse = function(data, req, res) {
1063
+ try {
1064
+ var prompt = data.prompt;
1065
+ if (!prompt) {
1066
+ return { error: 'No prompt provided' };
1067
+ }
1068
+
1069
+ // Simulate a response from ChatGPT
1070
+ var response = `ChatGPT response to: ${prompt}`;
1071
+ res.jsonp({ response: response });
1072
+ return { use_callback: true };
1073
+ } catch (e) {
1074
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
1075
+ }
1076
+ };
1077
+
1078
+ this.html = function(data, req, res) {
1079
+ return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
1080
+ };
1081
+ /* NEW AI RESPONSE API*/
1082
+
1083
+ this.executeJOEAiPrompt = async function(data, req, res) {
1084
+ const referencedObjectIds = []; // Track all objects touched during helper function
1085
+ try {
1086
+ const promptId = data.ai_prompt;
1087
+ // Support both payload shapes: { ai_prompt, params:{...}, ... } and flat
1088
+ const params = (data && (data.params || data)) || {};
1089
+
1090
+ if (!promptId) {
1091
+ return { error: "Missing prompt_id." };
1092
+ }
1093
+
1094
+ const prompt = await $J.get(promptId); // Use $J.get for consistency
1095
+ if (!prompt) {
1096
+ return { error: "Prompt not found." };
1097
+ }
1098
+
1099
+ let instructions = prompt.instructions || "";
1100
+ let finalInstructions=instructions;
1101
+ let finalInput='';
1102
+ // Pre-load all content_objects if content_items exist
1103
+ const contentObjects = {};
1104
+
1105
+ if (prompt.content_items && Array.isArray(prompt.content_items)) {
1106
+ for (const content of prompt.content_items) {
1107
+ if (params[content.reference]) {
1108
+ const obj = $J.get(params[content.reference]);
1109
+ if (obj) {
1110
+ contentObjects[content.itemtype] = obj;
1111
+
1112
+ // Pre-track referenced object
1113
+ if (obj._id && !referencedObjectIds.includes(obj._id)) {
1114
+ referencedObjectIds.push(obj._id);
1115
+ }
1116
+ }
1117
+ }
1118
+ }
1119
+ }
1120
+
1121
+ // Execute any helper functions if present
1122
+ if (prompt.functions) {
1123
+ const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
1124
+ const helperResult = await modFunc({
1125
+ instructions,
1126
+ params,
1127
+ ai_prompt: prompt,
1128
+ content_objects: contentObjects,
1129
+ trackObject: (obj) => {
1130
+ if (obj?._id && !referencedObjectIds.includes(obj._id)) {
1131
+ referencedObjectIds.push(obj._id);
1132
+ }
1133
+ }
1134
+ });
1135
+
1136
+ if (typeof helperResult === 'object' && helperResult.error) {
1137
+ return { error: helperResult.error };
1138
+ }
1139
+
1140
+ // Assume the result is { instructions, input }
1141
+ finalInstructions = helperResult.instructions || instructions;
1142
+ finalInput = helperResult.input;
1143
+ }
1144
+
1145
+ // Build a compact uploaded_files header from any referenced objects that
1146
+ // have uploader-style files with OpenAI ids. This gives the model
1147
+ // explicit metadata about which files were attached and their roles so
1148
+ // prompts (like MCP Tokenize Client) can reason about "transcript"
1149
+ // vs "summary" sources instead of guessing from content alone.
1150
+ let uploadedFilesMeta = [];
1151
+ try{
1152
+ Object.keys(contentObjects || {}).forEach(function(itemtype){
1153
+ const obj = contentObjects[itemtype];
1154
+ if (!obj || typeof obj !== 'object') { return; }
1155
+ Object.keys(obj).forEach(function(field){
1156
+ const val = obj[field];
1157
+ if (!Array.isArray(val)) { return; }
1158
+ val.forEach(function(f){
1159
+ if (f && f.openai_file_id) {
1160
+ uploadedFilesMeta.push({
1161
+ itemtype: itemtype,
1162
+ field: field,
1163
+ name: f.filename || '',
1164
+ role: f.file_role || null,
1165
+ openai_file_id: f.openai_file_id
1166
+ });
1167
+ }
1168
+ });
1169
+ });
1170
+ });
1171
+ }catch(_e){ /* best-effort only */ }
1172
+ if (uploadedFilesMeta.length) {
1173
+ try{
1174
+ const header = { uploaded_files: uploadedFilesMeta };
1175
+ if (finalInput && String(finalInput).trim().length) {
1176
+ finalInput = JSON.stringify({
1177
+ uploaded_files: uploadedFilesMeta,
1178
+ input: finalInput
1179
+ }, null, 2);
1180
+ } else {
1181
+ finalInput = JSON.stringify(header, null, 2);
1182
+ }
1183
+ }catch(_e){ /* if JSON.stringify fails, leave finalInput as-is */ }
1184
+ }
1185
+
1186
+ const openai = newClient(); // however your OpenAI client is created
1187
+
1188
+ // Normalize MCP options from the ai_prompt record.
1189
+ const mcpEnabled = !!prompt.mcp_enabled;
1190
+ const mcpToolset = prompt.mcp_toolset || 'read-only';
1191
+ const mcpSelected = Array.isArray(prompt.mcp_selected_tools) ? prompt.mcp_selected_tools : null;
1192
+ const mcpInstructionsMode = prompt.mcp_instructions_mode || 'auto';
1193
+
1194
+ // If MCP is enabled, prefer Responses+tools via runWithTools. Otherwise,
1195
+ // keep the existing single-call Responses behavior using prompt.tools.
1196
+ let response;
1197
+ let resolvedToolNames = null;
1198
+ let mcpToolCalls = [];
1199
+ if (mcpEnabled) {
1200
+ // Determine tool names from the configured toolset + overrides.
1201
+ const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
1202
+ resolvedToolNames = toolNames;
1203
+ const toolsForModel = MCP.getToolDefinitions(toolNames);
1204
+
1205
+ // Build per-tool MCP instructions (short) and append to the existing instructions.
1206
+ const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
1207
+ const systemText = [finalInstructions || instructions || '']
1208
+ .concat(mcpText ? ['\n', mcpText] : [])
1209
+ .join('\n')
1210
+ .trim();
1211
+
1212
+ const messages = [];
1213
+ if (finalInput && String(finalInput).trim().length) {
1214
+ messages.push({ role:'user', content:String(finalInput) });
1215
+ }
1216
+ // Ensure the Responses API always has some input when MCP is enabled.
1217
+ // For prompts that rely purely on system instructions, synthesize a
1218
+ // minimal user turn so the call remains valid.
1219
+ if (!messages.length) {
1220
+ messages.push({
1221
+ role: 'user',
1222
+ content: 'Follow the system instructions above and produce the requested output.'
1223
+ });
1224
+ }
1225
+
1226
+ const runResult = await runWithTools({
1227
+ openai: openai,
1228
+ model: prompt.ai_model || "gpt-4o",
1229
+ systemText: systemText,
1230
+ messages: messages,
1231
+ // Provide a synthetic assistant-style object so runWithTools can
1232
+ // normalize tools into Responses format.
1233
+ assistant: { tools: toolsForModel },
1234
+ // Pass through attachments so MCP runs see the same files as
1235
+ // non‑MCP prompts (direct or file_search modes).
1236
+ attachments_mode: prompt.attachments_mode || 'direct',
1237
+ openai_file_ids: Array.isArray(data.openai_file_ids) ? data.openai_file_ids : null,
1238
+ req: req
1239
+ });
1240
+ response = runResult.response;
1241
+ if (runResult && Array.isArray(runResult.toolCalls)) {
1242
+ mcpToolCalls = runResult.toolCalls.map(function(tc){
1243
+ return {
1244
+ name: tc && (tc.name || tc.function_name || tc.tool_name),
1245
+ arguments: tc && tc.arguments
1246
+ };
1247
+ }).filter(function(x){ return x && x.name; });
1248
+ }
1249
+ } else {
1250
+ const payloadBase = {
1251
+ model: prompt.ai_model || "gpt-4o",
1252
+ instructions: finalInstructions||instructions, // string only
1253
+ input:finalInput||'',
1254
+ tools: prompt.tools || [{ "type": "web_search" }],
1255
+ tool_choice: prompt.tool_choice || "auto",
1256
+ temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
1257
+ //return_token_usage: true
1258
+ //max_tokens: prompt.max_tokens ?? 1200
1259
+ };
1260
+ coloredLog(`${payloadBase.model} and ${payloadBase.temperature}`);
1261
+ const mode = (prompt.attachments_mode || 'direct');
1262
+ let payload = payloadBase;
1263
+ if (Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1264
+ try{
1265
+ payload = await attachFilesToResponsesPayload(openai, payloadBase, {
1266
+ attachments_mode: mode,
1267
+ openai_file_ids: data.openai_file_ids
1268
+ });
1269
+ }catch(e){
1270
+ console.warn('[chatgpt] attachFilesToResponsesPayload failed; continuing without attachments', e && e.message || e);
1271
+ }
1272
+ }
1273
+ response = await safeResponsesCreate(openai, payload);
1274
+ }
1275
+
1276
+
1277
+ // const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
1278
+
1279
+ // const response = await openai.chat.completions.create(payload);
1280
+
1281
+ const saved = await saveAiResponseRefactor({
1282
+ prompt,
1283
+ ai_response_content: response.output_text || "",
1284
+ user_prompt: finalInput || '',
1285
+ params,
1286
+ referenced_object_ids: referencedObjectIds,
1287
+ response_id:response.id,
1288
+ usage: response.usage || {},
1289
+ user: req && req.User,
1290
+ ai_assistant_id: data.ai_assistant_id,
1291
+ mcp_enabled: mcpEnabled,
1292
+ mcp_toolset: mcpToolset,
1293
+ mcp_selected_tools: resolvedToolNames || (Array.isArray(mcpSelected) ? mcpSelected : []),
1294
+ mcp_instructions_mode: mcpInstructionsMode,
1295
+ mcp_tools_used: mcpToolCalls
1296
+ });
1297
+ try{
1298
+ if (saved && Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1299
+ saved.used_openai_file_ids = data.openai_file_ids.slice(0,10);
1300
+ await new Promise(function(resolve){
1301
+ JOE.Storage.save(saved,'ai_response',function(){ resolve(); },{ user: req && req.User, history:false });
1302
+ });
1303
+ }
1304
+ }catch(_e){}
1305
+
1306
+ return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
1307
+ } catch (e) {
1308
+ console.error('❌ executeJOEAiPrompt error:', e);
1309
+ return { error: "Failed to execute AI prompt.",message: e.message };
1310
+ }
1311
+ };
1312
+
1313
+ function createResponsePayload(prompt, params, instructions, user_prompt) {
1314
+ return {
1315
+ model: prompt.model || "gpt-4o",
1316
+ messages: [
1317
+ { role: "system", content: instructions },
1318
+ { role: "user", content: user_prompt || "" }
1319
+ ],
1320
+ tools: prompt.tools || undefined,
1321
+ tool_choice: prompt.tool_choice || "auto",
1322
+ temperature: prompt.temperature ?? 0.7,
1323
+ max_tokens: prompt.max_tokens ?? 1200
1324
+ };
1325
+ }
1326
+ async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id, mcp_enabled, mcp_toolset, mcp_selected_tools, mcp_instructions_mode, mcp_tools_used }) {
1327
+ var response_keys = [];
1328
+ try {
1329
+ response_keys = Object.keys(JSON.parse(ai_response_content));
1330
+ }catch (e) {
1331
+ console.error('❌ Error parsing AI response content for keys:', e);
1332
+ }
1333
+ // Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
1334
+ let parsedResponse = null;
1335
+ try {
1336
+ const jt = extractJsonText(ai_response_content);
1337
+ if (jt) {
1338
+ parsedResponse = JSON.parse(jt);
1339
+ }
1340
+ } catch(_e) {
1341
+ parsedResponse = null;
1342
+ }
1343
+ var creator_type = null;
1344
+ var creator_id = null;
1345
+ try{
1346
+ if (ai_assistant_id){
1347
+ creator_type = 'ai_assistant';
1348
+ creator_id = ai_assistant_id;
1349
+ } else if (user && user._id){
1350
+ creator_type = 'user';
1351
+ creator_id = user._id;
1352
+ }
1353
+ }catch(_e){}
1354
+ const aiResponse = {
1355
+ name: `${prompt.name}`,
1356
+ itemtype: 'ai_response',
1357
+ ai_prompt: prompt._id,
1358
+ prompt_name: prompt.name,
1359
+ prompt_method:prompt.prompt_method,
1360
+ response: ai_response_content,
1361
+ response_json: parsedResponse,
1362
+ response_keys: response_keys,
1363
+ response_id:response_id||'',
1364
+ user_prompt: user_prompt,
1365
+ params_used: params,
1366
+ usage: usage || {},
1367
+ tags: prompt.tags || [],
1368
+ model_used: prompt.ai_model || "gpt-4o",
1369
+ referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
1370
+ created: (new Date).toISOString(),
1371
+ _id: cuid(),
1372
+ creator_type: creator_type,
1373
+ creator_id: creator_id
1374
+ };
1375
+ // Only attach MCP metadata when MCP was actually enabled for this run, to
1376
+ // avoid introducing nulls into history diffs.
1377
+ try{
1378
+ if (mcp_enabled) {
1379
+ aiResponse.mcp_enabled = true;
1380
+ if (mcp_toolset) { aiResponse.mcp_toolset = mcp_toolset; }
1381
+ if (Array.isArray(mcp_selected_tools) && mcp_selected_tools.length) {
1382
+ aiResponse.mcp_selected_tools = mcp_selected_tools;
1383
+ }
1384
+ if (mcp_instructions_mode) {
1385
+ aiResponse.mcp_instructions_mode = mcp_instructions_mode;
1386
+ }
1387
+ if (Array.isArray(mcp_tools_used) && mcp_tools_used.length) {
1388
+ aiResponse.mcp_tools_used = mcp_tools_used;
1389
+ }
1390
+ }
1391
+ }catch(_e){}
1392
+
1393
+ await new Promise((resolve, reject) => {
1394
+ JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
1395
+ if (err) {
1396
+ console.error('❌ Error saving AI response:', err);
1397
+ reject(err);
1398
+ } else {
1399
+ console.log('✅ AI response saved successfully');
1400
+ resolve(result);
1401
+ }
1402
+ });
1403
+ });
1404
+
1405
+ return aiResponse;
1406
+ }
1407
+
1408
+ // ---------- Widget chat endpoints (Responses API + optional assistants) ----------
1409
+ function normalizeMessages(messages) {
1410
+ if (!Array.isArray(messages)) { return []; }
1411
+ return messages.map(function (m) {
1412
+ return {
1413
+ role: m.role || 'assistant',
1414
+ content: m.content || '',
1415
+ created_at: m.created_at || m.created || new Date().toISOString()
1416
+ };
1417
+ });
1418
+ }
1419
+
1420
+ /**
1421
+ * widgetStart
1422
+ *
1423
+ * Purpose:
1424
+ * Create and persist a new `ai_widget_conversation` record for the
1425
+ * external `<joe-ai-widget>` chat component. This is a lightweight
1426
+ * conversation record that stores model, assistant, system text and
1427
+ * messages for the widget.
1428
+ *
1429
+ * Inputs (data):
1430
+ * - model (optional) override model for the widget
1431
+ * - ai_assistant_id (optional) JOE ai_assistant cuid
1432
+ * - system (optional) explicit system text
1433
+ * - source (optional) freeform source tag, defaults to "widget"
1434
+ *
1435
+ * OpenAI calls:
1436
+ * - None. This endpoint only touches storage.
1437
+ *
1438
+ * Output:
1439
+ * - { success, conversation_id, model, assistant_id }
1440
+ * where assistant_id is the OpenAI assistant_id (if present).
1441
+ */
1442
+ this.widgetStart = async function (data, req, res) {
1443
+ try {
1444
+ var body = data || {};
1445
+ // Default to a modern chat model when no assistant/model is provided.
1446
+ // If an assistant is supplied, its ai_model will override this.
1447
+ var model = body.model || "gpt-5.1";
1448
+ var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
1449
+ var system = body.system || (assistant && assistant.instructions) || "";
1450
+ // Prefer explicit user fields coming from the client (ai-widget-test page
1451
+ // passes _joe.User fields). Widget endpoints no longer infer from req.User
1452
+ // to keep a single, explicit source of truth.
1453
+ var user = null;
1454
+ if (body.user_id || body.user_name || body.user_color) {
1455
+ user = {
1456
+ _id: body.user_id,
1457
+ name: body.user_name,
1458
+ fullname: body.user_name,
1459
+ color: body.user_color
1460
+ };
1461
+ }
1462
+ var user_color = (body.user_color) || (user && user.color) || null;
1463
+
1464
+ var convo = {
1465
+ _id: (typeof cuid === 'function') ? cuid() : undefined,
1466
+ itemtype: "ai_widget_conversation",
1467
+ model: (assistant && assistant.ai_model) || model,
1468
+ assistant: assistant && assistant._id,
1469
+ assistant_id: assistant && assistant.assistant_id,
1470
+ assistant_color: assistant && assistant.assistant_color,
1471
+ user: user && user._id,
1472
+ user_name: user && (user.fullname || user.name),
1473
+ user_color: user_color,
1474
+ system: system,
1475
+ messages: [],
1476
+ source: body.source || "widget",
1477
+ created: new Date().toISOString(),
1478
+ joeUpdated: new Date().toISOString()
1479
+ };
1480
+
1481
+ const saved = await new Promise(function (resolve, reject) {
1482
+ // Widget conversations are lightweight and do not need full history diffs.
1483
+ JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
1484
+ if (err) return reject(err);
1485
+ resolve(result);
1486
+ }, { history: false });
1487
+ });
1488
+
1489
+ return {
1490
+ success: true,
1491
+ conversation_id: saved._id,
1492
+ model: saved.model,
1493
+ assistant_id: saved.assistant_id || null,
1494
+ assistant_color: saved.assistant_color || null,
1495
+ user_color: saved.user_color || user_color || null
1496
+ };
1497
+ } catch (e) {
1498
+ console.error("[chatgpt] widgetStart error:", e);
1499
+ return { success: false, error: e && e.message || "Unknown error" };
1500
+ }
1501
+ };
1502
+
1503
+ /**
1504
+ * widgetHistory
1505
+ *
1506
+ * Purpose:
1507
+ * Load an existing `ai_widget_conversation` and normalize its
1508
+ * messages for use by `<joe-ai-widget>` on page load or refresh.
1509
+ *
1510
+ * Inputs (data):
1511
+ * - conversation_id or _id: the widget conversation cuid
1512
+ *
1513
+ * OpenAI calls:
1514
+ * - None. Purely storage + normalization.
1515
+ *
1516
+ * Output:
1517
+ * - { success, conversation_id, model, assistant_id, messages }
1518
+ */
1519
+ this.widgetHistory = async function (data, req, res) {
1520
+ try {
1521
+ var conversation_id = data.conversation_id || data._id;
1522
+ if (!conversation_id) {
1523
+ return { success: false, error: "Missing conversation_id" };
1524
+ }
1525
+ const convo = await new Promise(function (resolve, reject) {
1526
+ JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1527
+ if (err) return reject(err);
1528
+ resolve(results && results[0]);
1529
+ });
1530
+ });
1531
+ if (!convo) {
1532
+ return { success: false, error: "Conversation not found" };
1533
+ }
1534
+
1535
+ convo.messages = normalizeMessages(convo.messages);
1536
+ return {
1537
+ success: true,
1538
+ conversation_id: convo._id,
1539
+ model: convo.model,
1540
+ assistant_id: convo.assistant_id || null,
1541
+ assistant_color: convo.assistant_color || null,
1542
+ user_color: convo.user_color || null,
1543
+ messages: convo.messages
1544
+ };
1545
+ } catch (e) {
1546
+ console.error("[chatgpt] widgetHistory error:", e);
1547
+ return { success: false, error: e && e.message || "Unknown error" };
1548
+ }
1549
+ };
1550
+
1551
+ /**
1552
+ * widgetMessage
1553
+ *
1554
+ * Purpose:
1555
+ * Handle a single user turn for `<joe-ai-widget>`:
1556
+ * - Append the user message to the stored conversation.
1557
+ * - Call OpenAI Responses (optionally with tools from the selected
1558
+ * `ai_assistant`, via runWithTools + MCP).
1559
+ * - Append the assistant reply, persist the conversation, and return
1560
+ * the full message history plus the latest assistant message.
1561
+ *
1562
+ * Inputs (data):
1563
+ * - conversation_id or _id: cuid of the widget conversation
1564
+ * - content: user text
1565
+ * - role: user role, defaults to "user"
1566
+ * - assistant_id: optional OpenAI assistant_id (used only to
1567
+ * locate the JOE ai_assistant config)
1568
+ * - model: optional model override
1569
+ *
1570
+ * OpenAI calls:
1571
+ * - responses.create (once if no tools; twice when tools are present):
1572
+ * * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
1573
+ * * Any tool calls are executed via MCP and injected as `tool` messages.
1574
+ * * Second call is plain Responses with updated messages.
1575
+ *
1576
+ * Output:
1577
+ * - { success, conversation_id, model, assistant_id, messages,
1578
+ * last_message, usage }
1579
+ */
1580
+ this.widgetMessage = async function (data, req, res) {
1581
+ try {
1582
+ var body = data || {};
1583
+ var conversation_id = body.conversation_id || body._id;
1584
+ var content = body.content;
1585
+ var role = body.role || "user";
1586
+
1587
+ if (!conversation_id || !content) {
1588
+ return { success: false, error: "Missing conversation_id or content" };
1589
+ }
1590
+
1591
+ const convo = await new Promise(function (resolve, reject) {
1592
+ JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1593
+ if (err) return reject(err);
1594
+ resolve(results && results[0]);
1595
+ });
1596
+ });
1597
+ if (!convo) {
1598
+ return { success: false, error: "Conversation not found" };
1599
+ }
1600
+
1601
+ convo.messages = normalizeMessages(convo.messages);
1602
+ const nowIso = new Date().toISOString();
1603
+
1604
+ // Append user message
1605
+ const userMsg = { role: role, content: content, created_at: nowIso };
1606
+ convo.messages.push(userMsg);
1607
+
1608
+ // Backfill user metadata (id/name/color) on older conversations that
1609
+ // were created before we started storing these fields. Prefer explicit
1610
+ // body fields only; we no longer infer from req.User so that widget
1611
+ // calls always have a single, explicit user source.
1612
+ var u = null;
1613
+ if (body.user_id || body.user_name || body.user_color) {
1614
+ u = {
1615
+ _id: body.user_id,
1616
+ name: body.user_name,
1617
+ fullname: body.user_name,
1618
+ color: body.user_color
1619
+ };
1620
+ }
1621
+ if (u) {
1622
+ if (!convo.user && u._id) {
1623
+ convo.user = u._id;
1624
+ }
1625
+ if (!convo.user_name && (u.fullname || u.name)) {
1626
+ convo.user_name = u.fullname || u.name;
1627
+ }
1628
+ if (!convo.user_color && u.color) {
1629
+ convo.user_color = u.color;
1630
+ }
1631
+ }
1632
+
1633
+ const assistantId = body.assistant_id || convo.assistant_id || null;
1634
+ // NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
1635
+ // We do NOT pass assistant_id to the Responses API (it is not supported in the
1636
+ // version we are using); instead we look up the JOE ai_assistant by assistant_id
1637
+ // and inject its configuration (model, instructions, tools) into the request.
1638
+ var assistantObj = null;
1639
+ if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
1640
+ assistantObj = JOE.Data.ai_assistant.find(function (a) {
1641
+ return a && a.assistant_id === assistantId;
1642
+ }) || null;
1643
+ }
1644
+ const openai = newClient();
1645
+ const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
1646
+
1647
+ // Prefer explicit system text on the conversation, then assistant instructions.
1648
+ const systemText = (convo.system && String(convo.system)) ||
1649
+ (assistantObj && assistantObj.instructions) ||
1650
+ "";
1651
+ const messagesForModel = convo.messages.map(function (m) {
1652
+ return { role: m.role, content: m.content };
1653
+ });
1654
+
1655
+ // Use runWithTools so that, when an assistant has tools configured,
1656
+ // we let the model call those tools via MCP before generating a
1657
+ // final response.
1658
+ const runResult = await runWithTools({
1659
+ openai: openai,
1660
+ model: model,
1661
+ systemText: systemText,
1662
+ messages: messagesForModel,
1663
+ assistant: assistantObj,
1664
+ req: req
1665
+ });
1666
+
1667
+ // If tools were called this turn, inject a small meta message so the
1668
+ // widget clearly shows which functions ran before the assistant reply.
1669
+ if (runResult.toolCalls && runResult.toolCalls.length) {
1670
+ const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
1671
+ .filter(Boolean)
1672
+ .join(', ');
1673
+ convo.messages.push({
1674
+ role: "assistant",
1675
+ meta: "tools_used",
1676
+ content: "[Tools used this turn: " + names + "]",
1677
+ created_at: nowIso
1678
+ });
1679
+ }
1680
+
1681
+ const assistantText = runResult.finalText || "";
1682
+ const assistantMsg = {
1683
+ role: "assistant",
1684
+ content: assistantText,
1685
+ created_at: new Date().toISOString()
1686
+ };
1687
+ convo.messages.push(assistantMsg);
1688
+ convo.last_message_at = assistantMsg.created_at;
1689
+ convo.joeUpdated = assistantMsg.created_at;
1690
+
1691
+ await new Promise(function (resolve, reject) {
1692
+ // Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
1693
+ JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
1694
+ if (err) return reject(err);
1695
+ resolve(saved);
1696
+ }, { history: false });
1697
+ });
1698
+
1699
+ return {
1700
+ success: true,
1701
+ conversation_id: convo._id,
1702
+ model: model,
1703
+ assistant_id: assistantId,
1704
+ assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
1705
+ user_color: convo.user_color || ((u && u.color) || null),
1706
+ messages: convo.messages,
1707
+ last_message: assistantMsg,
1708
+ // Usage comes from the underlying Responses call inside runWithTools.
1709
+ usage: (runResult.response && runResult.response.usage) || {}
1710
+ };
1711
+ } catch (e) {
1712
+ console.error("[chatgpt] widgetMessage error:", e);
1713
+ return { success: false, error: e && e.message || "Unknown error" };
1714
+ }
1715
+ };
1716
+
1717
+ // Mark async plugin methods so Server.pluginHandling will await them.
1718
+ this.async = {
1719
+ executeJOEAiPrompt: this.executeJOEAiPrompt,
1720
+ testPrompt: this.testPrompt,
1721
+ sendInitialConsultTranscript: this.sendInitialConsultTranscript,
1722
+ widgetStart: this.widgetStart,
1723
+ widgetHistory: this.widgetHistory,
1724
+ widgetMessage: this.widgetMessage,
1725
+ autofill: this.autofill,
1726
+ filesRetryFromUrl: this.filesRetryFromUrl
1727
+ };
1728
+ this.protected = [,'testPrompt'];
1729
+ return self;
1730
+ }
1731
+
1732
+ module.exports = new ChatGPT();