json-object-editor 0.10.654 → 0.10.660

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,1495 +1,1864 @@
1
- const OpenAI = require("openai");
2
- const { google } = require('googleapis');
3
- const path = require('path');
4
- const os = require('os');
5
- const fs = require('fs');
6
- const MCP = require("../modules/MCP.js");
7
- // const { name } = require("json-object-editor/server/webconfig");
8
-
9
- function ChatGPT() {
10
- // const fetch = (await import('node-fetch')).default;
11
- //const openai = new OpenAI();
12
- // Load the service account key JSON file
13
- const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
14
- const google_auth = new google.auth.GoogleAuth({
15
- keyFile: serviceAccountKeyFile,
16
- scopes: ['https://www.googleapis.com/auth/documents.readonly'],
17
- });
18
-
19
- var self = this;
20
- this.async ={};
21
- function coloredLog(message){
22
- console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
23
- }
24
- //xx -setup and send a test prompt to chatgpt
25
- //xx get the api key from joe settings
26
-
27
- //get a prompt from id
28
- //send the prompt to chatgpt
29
-
30
- //++get the cotnent of a file
31
- //++send the content of a file to chatgpt
32
-
33
- //++ structure data
34
- //++ save the response to an ai_repsonse
35
- //create an ai_response
36
- //store the content
37
- //attach to the request
38
- //store ids sent with the request
39
- this.default = function(data, req, res) {
40
- try {
41
- var payload = {
42
- params: req.params,
43
- data: data
44
- };
45
- } catch (e) {
46
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
47
- }
48
- return payload;
49
- };
50
- function getAPIKey() {
51
- const setting = JOE.Utils.Settings('OPENAI_API_KEY');
52
- if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
53
- return setting;
54
- }
55
- function getSchemaDef(name) {
56
- if (!name) return { full: null, summary: null };
57
- const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
58
- const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
59
- return { full, summary };
60
- }
61
- /**
62
- * callMCPTool
63
- *
64
- * Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
65
- * without going over HTTP or worrying about POST size limits.
66
- *
67
- * Usage:
68
- * const result = await callMCPTool('listSchemas', {}, { req });
69
- *
70
- * Notes:
71
- * - `toolName` must exist on MCP.tools.
72
- * - `params` should be a plain JSON-serializable object.
73
- * - `ctx` is optional and can pass `{ req }` or other context that MCP
74
- * tools might want (for auth, user, etc.).
75
- */
76
- async function callMCPTool(toolName, params = {}, ctx = {}) {
77
- if (!MCP || !MCP.tools) {
78
- throw new Error("MCP module not initialized; cannot call MCP tool");
79
- }
80
- if (!toolName || typeof toolName !== 'string') {
81
- throw new Error("Missing or invalid MCP tool name");
82
- }
83
- const fn = MCP.tools[toolName];
84
- if (typeof fn !== 'function') {
85
- throw new Error(`MCP tool "${toolName}" not found`);
86
- }
87
- try {
88
- // All MCP tools accept (params, ctx) and return a JSON-serializable result.
89
- // The Responses / tools API often returns arguments as a JSON string, so
90
- // normalize that here before invoking the tool.
91
- let toolParams = params;
92
- if (typeof toolParams === 'string') {
93
- try {
94
- toolParams = JSON.parse(toolParams);
95
- } catch (parseErr) {
96
- console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
97
- // Fall back to passing the raw string so tools that expect it still work.
98
- }
99
- }
100
- const result = await fn(toolParams || {}, ctx || {});
101
- return result;
102
- } catch (e) {
103
- // Surface a clean error upstream but keep details in logs.
104
- console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
105
- throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
106
- }
107
- }
108
-
109
- /**
110
- * extractToolCalls
111
- *
112
- * Best-effort parser for tool calls from a Responses API result.
113
- * The Responses output shape may evolve; this function looks for
114
- * any "tool_call" typed content in response.output[*].content[*]
115
- * and normalizes it into `{ name, arguments }` objects.
116
- */
117
- function extractToolCalls(response) {
118
- var calls = [];
119
- if (!response || !Array.isArray(response.output)) { return calls; }
120
-
121
- response.output.forEach(function (item) {
122
- if (!item) { return; }
123
- // v1-style: item.type === 'tool_call'
124
- if (item.type === 'function_call') {
125
- calls.push({
126
- name: item.name || item.function_name,
127
- arguments: item.arguments || item.function_arguments || {}
128
- });
129
- }
130
- // message-style: item.content is an array of parts
131
- if (Array.isArray(item.content)) {
132
- item.content.forEach(function (part) {
133
- if (!part) { return; }
134
- if (part.type === 'function_call') {
135
- calls.push({
136
- name: part.name || part.tool_name,
137
- arguments: part.arguments || part.args || {}
138
- });
139
- }
140
- });
141
- }
142
- });
143
-
144
- return calls;
145
- }
146
-
147
- // Detect "request too large / token limit" style errors from the Responses API.
148
- function isTokenLimitError(err) {
149
- if (!err || typeof err !== 'object') return false;
150
- if (err.status !== 429 && err.status !== 400) return false;
151
- const msg = (err.error && err.error.message) || err.message || '';
152
- if (!msg) return false;
153
- const lower = String(msg).toLowerCase();
154
- // Cover common phrasing from OpenAI for context/TPM limits.
155
- return (
156
- lower.includes('request too large') ||
157
- lower.includes('too many tokens') ||
158
- lower.includes('max tokens') ||
159
- lower.includes('maximum context length') ||
160
- lower.includes('tokens per min')
161
- );
162
- }
163
-
164
- // Create a compact representation of a JOE object for use in slim payloads.
165
- function slimJOEObject(item) {
166
- if (!item || typeof item !== 'object') return item;
167
- const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
168
- const info = item.info || item.description || item.summary || '';
169
- return {
170
- _id: item._id,
171
- itemtype: item.itemtype,
172
- name: name,
173
- info: info
174
- };
175
- }
176
-
177
- // Given an `understandObject` result, produce a slimmed version:
178
- // - keep `object` as-is
179
- // - keep `flattened` for the main object (depth-limited) if present
180
- // - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
181
- // - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
182
- function slimUnderstandObjectResult(result) {
183
- if (!result || typeof result !== 'object') return result;
184
- const out = {
185
- _id: result._id,
186
- itemtype: result.itemtype,
187
- object: result.object,
188
- // retain main flattened view if available; this is typically much smaller
189
- flattened: result.flattened || null,
190
- schemas: result.schemas || {},
191
- tags: result.tags || {},
192
- statuses: result.statuses || {},
193
- slim: true
194
- };
195
- if (Array.isArray(result.related)) {
196
- out.related = result.related.map(function (rel) {
197
- if (!rel) return rel;
198
- const base = rel.object || {};
199
- const slim = slimJOEObject(base);
200
- return {
201
- field: rel.field,
202
- _id: slim && slim._id || rel._id,
203
- itemtype: slim && slim.itemtype || rel.itemtype,
204
- object: slim
205
- };
206
- });
207
- } else {
208
- out.related = [];
209
- }
210
- return out;
211
- }
212
-
213
- // Walk the messages array and, for any system message containing a JSON payload
214
- // of the form { "tool": "understandObject", "result": {...} }, replace the
215
- // result with a slimmed version to reduce token count. Returns a new array; if
216
- // nothing was changed, returns the original array.
217
- function shrinkUnderstandObjectMessagesForTokens(messages) {
218
- if (!Array.isArray(messages)) return messages;
219
- let changed = false;
220
- const shrunk = messages.map(function (msg) {
221
- if (!msg || msg.role !== 'system') return msg;
222
- if (typeof msg.content !== 'string') return msg;
223
- try {
224
- const parsed = JSON.parse(msg.content);
225
- if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
226
- return msg;
227
- }
228
- const slimmed = slimUnderstandObjectResult(parsed.result);
229
- changed = true;
230
- return {
231
- ...msg,
232
- content: JSON.stringify({ tool: 'understandObject', result: slimmed })
233
- };
234
- } catch (_e) {
235
- return msg;
236
- }
237
- });
238
- return changed ? shrunk : messages;
239
- }
240
-
241
- /**
242
- * runWithTools
243
- *
244
- * Single orchestration function for calling the OpenAI Responses API
245
- * with optional tools (sourced from a JOE `ai_assistant`), handling
246
- * tool calls via MCP, and issuing a follow-up model call with the
247
- * tool results injected.
248
- *
249
- * Inputs (opts):
250
- * - openai: OpenAI client instance
251
- * - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
252
- * - systemText: string of system / instructions text
253
- * - messages: array of { role, content } for the conversation so far
254
- * - assistant: JOE `ai_assistant` object (may contain `tools`)
255
- * - req: Express request (passed into MCP tools as context)
256
- *
257
- * Returns:
258
- * - { response, finalText, messages, toolCalls }
259
- * where `finalText` is the assistant-facing text (from output_text)
260
- * and `messages` is the possibly-extended message list including
261
- * any synthetic `tool` messages.
262
- */
263
- async function runWithTools(opts) {
264
- const openai = opts.openai;
265
- const model = opts.model;
266
- const systemText = opts.systemText || "";
267
- const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
268
- const assistant = opts.assistant || null;
269
- const req = opts.req;
270
-
271
- // Normalize tools: in many schemas tools may be stored as a JSON string;
272
- // here we accept either an array or a JSON-stringified array.
273
- let tools = null;
274
- if (assistant && assistant.tools) {
275
- if (Array.isArray(assistant.tools)) {
276
- tools = assistant.tools;
277
- } else if (typeof assistant.tools === 'string') {
278
- try {
279
- const parsed = JSON.parse(assistant.tools);
280
- if (Array.isArray(parsed)) {
281
- tools = parsed;
282
- }
283
- } catch (e) {
284
- console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
285
- }
286
- }
287
- }
288
- // Normalize tool definitions for the Responses API. The assistant UI
289
- // uses the Assistants-style shape ({ type:'function', function:{...} }),
290
- // but Responses expects the name/description/parameters at the top level:
291
- // { type:'function', name:'x', description:'...', parameters:{...} }
292
- if (Array.isArray(tools)) {
293
- tools = tools.map(function (t) {
294
- if (t && t.type === 'function' && t.function && !t.name) {
295
- const fn = t.function || {};
296
- return {
297
- type: 'function',
298
- name: fn.name,
299
- description: fn.description,
300
- parameters: fn.parameters || {}
301
- };
302
- }
303
- return t;
304
- });
305
- }
306
-
307
- // No tools configured – do a simple single Responses call.
308
- if (!tools) {
309
- const resp = await openai.responses.create({
310
- model: model,
311
- instructions: systemText,
312
- input: messages
313
- });
314
- return {
315
- response: resp,
316
- finalText: resp.output_text || "",
317
- messages: messages,
318
- toolCalls: []
319
- };
320
- }
321
-
322
- // Step 1: call the model with tools enabled.
323
- const first = await openai.responses.create({
324
- model: model,
325
- instructions: systemText,
326
- input: messages,
327
- tools: tools,
328
- tool_choice: "auto"
329
- });
330
-
331
- const toolCalls = extractToolCalls(first);
332
-
333
- // If the model didn't decide to use tools, just return the first answer.
334
- if (!toolCalls.length) {
335
- return {
336
- response: first,
337
- finalText: first.output_text || "",
338
- messages: messages,
339
- toolCalls: []
340
- };
341
- }
342
-
343
- // Step 2: execute each tool call via MCP and append tool results.
344
- for (let i = 0; i < toolCalls.length; i++) {
345
- const tc = toolCalls[i];
346
- try {
347
- const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
348
- messages.push({
349
- // Responses API does not support a "tool" role in messages.
350
- // We inject tool outputs as a synthetic system message so
351
- // the model can see the results without affecting the
352
- // user/assistant turn structure.
353
- role: "system",
354
- content: JSON.stringify({ tool: tc.name, result: result })
355
- });
356
- } catch (e) {
357
- console.error("[chatgpt] MCP tool error in runWithTools:", e);
358
- messages.push({
359
- role: "system",
360
- content: JSON.stringify({
361
- tool: tc.name,
362
- error: e && e.message || "Tool execution failed"
363
- })
364
- });
365
- }
366
- }
367
-
368
- // Step 3: ask the model again with tool outputs included.
369
- let finalMessages = messages;
370
- let second;
371
- try {
372
- second = await openai.responses.create({
373
- model: model,
374
- instructions: systemText,
375
- input: finalMessages
376
- });
377
- } catch (e) {
378
- if (isTokenLimitError(e)) {
379
- console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
380
- const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
381
- // If nothing was shrunk, just rethrow the original error.
382
- if (shrunk === finalMessages) {
383
- throw e;
384
- }
385
- finalMessages = shrunk;
386
- // Retry once with the smaller payload; let any error bubble up.
387
- second = await openai.responses.create({
388
- model: model,
389
- instructions: systemText,
390
- input: finalMessages
391
- });
392
- } else {
393
- throw e;
394
- }
395
- }
396
-
397
- return {
398
- response: second,
399
- finalText: second.output_text || "",
400
- messages: finalMessages,
401
- toolCalls: toolCalls
402
- };
403
- }
404
-
405
- // function newClient(){
406
- // var key = getAPIKey();
407
- // var c = new OpenAI({
408
- // apiKey: key, // This is the default and can be omitted
409
- // });
410
- // if(!c || !c.apiKey){
411
- // return { errors: 'No API key provided' };
412
- // }
413
- // return c;
414
- // }
415
- function newClient() {
416
- return new OpenAI({ apiKey: getAPIKey() });
417
- }
418
-
419
- // Safely call Responses API with optional temperature/top_p.
420
- // If the model rejects these parameters, strip and retry once.
421
- async function safeResponsesCreate(openai, payload){
422
- try{
423
- return await openai.responses.create(payload);
424
- }catch(e){
425
- try{
426
- var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
427
- var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
428
- var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
429
- var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
430
- var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
431
- if (badTemp || badTopP || unknownTemp || unknownTopP){
432
- var p2 = Object.assign({}, payload);
433
- if (p2.hasOwnProperty('temperature')) delete p2.temperature;
434
- if (p2.hasOwnProperty('top_p')) delete p2.top_p;
435
- console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
436
- return await openai.responses.create(p2);
437
- }
438
- }catch(_e){ /* fallthrough */ }
439
- throw e;
440
- }
441
- }
442
-
443
- // Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
444
- async function ensureVectorStoreForFiles(fileIds = []){
445
- const openai = newClient();
446
- // Create ephemeral store per run (could be optimized to reuse/persist later)
447
- const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
448
- const storeId = vs.id;
449
- // Link files by id
450
- for (const fid of (fileIds||[]).slice(0,10)) {
451
- try{
452
- await openai.vectorStores.files.create(storeId, { file_id: fid });
453
- }catch(e){
454
- console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
455
- }
456
- }
457
- // Poll (best-effort) until files are processed or timeout
458
- const timeoutMs = 8000;
459
- const start = Date.now();
460
- try{
461
- while(Date.now() - start < timeoutMs){
462
- const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
463
- const items = (listed && listed.data) || [];
464
- const pending = items.some(f => f.status && f.status !== 'completed');
465
- if(!pending){ break; }
466
- await new Promise(r => setTimeout(r, 500));
467
- }
468
- }catch(_e){ /* non-fatal */ }
469
- return { vectorStoreId: storeId };
470
- }
471
-
472
- // ---------------- OpenAI Files helpers ----------------
473
- async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
474
- const openai = newClient();
475
- const usePurpose = purpose || 'assistants';
476
- const tmpDir = os.tmpdir();
477
- const safeName = filename || ('upload_' + Date.now());
478
- const tmpPath = path.join(tmpDir, safeName);
479
- await fs.promises.writeFile(tmpPath, buffer);
480
- try {
481
- // openai.files.create accepts a readable stream
482
- const fileStream = fs.createReadStream(tmpPath);
483
- const created = await openai.files.create({
484
- purpose: usePurpose,
485
- file: fileStream
486
- });
487
- return { id: created.id, purpose: usePurpose };
488
- } finally {
489
- // best-effort cleanup
490
- fs.promises.unlink(tmpPath).catch(()=>{});
491
- }
492
- }
493
-
494
- // Expose a helper that other plugins can call in-process
495
- this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
496
- if (!buffer || !buffer.length) {
497
- throw new Error('Missing buffer');
498
- }
499
- return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
500
- };
501
-
502
- // Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
503
- this.filesRetryFromUrl = async function (data, req, res) {
504
- try {
505
- const { default: got } = await import('got');
506
- const url = data && (data.url || data.location);
507
- const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
508
- const contentType = data && data.contentType || undefined;
509
- const purpose = 'assistants';
510
- if (!url) {
511
- return { success: false, error: 'Missing url' };
512
- }
513
- const resp = await got(url, { responseType: 'buffer' });
514
- const buffer = resp.body;
515
- const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
516
- return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
517
- } catch (e) {
518
- return { success: false, error: e && e.message || 'Retry upload failed' };
519
- }
520
- };
521
- this.testPrompt= async function(data, req, res) {
522
- try {
523
- var payload = {
524
- params: req.params,
525
- data: data
526
- };
527
- } catch (e) {
528
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
529
- }
530
- const client = newClient();
531
- if(client.errors){
532
- return { errors: client.errors };
533
- }
534
- try {
535
- const chatCompletion = await client.chat.completions.create({
536
- messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
537
- model: 'gpt-4o',
538
- });
539
- coloredLog(chatCompletion);
540
- const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
541
- // Optionally persist as ai_response with parsed JSON when applicable
542
- const parsed = (function(){
543
- try {
544
- const jt = extractJsonText(text);
545
- return jt ? JSON.parse(jt) : null;
546
- } catch(_e){ return null; }
547
- })();
548
- try {
549
- var creator_type = null;
550
- var creator_id = null;
551
- try{
552
- var u = req && req.User;
553
- if (u && u._id){
554
- creator_type = 'user';
555
- creator_id = u._id;
556
- }
557
- }catch(_e){}
558
- const aiResponse = {
559
- itemtype: 'ai_response',
560
- name: 'Test Prompt → ChatGPT',
561
- response_type: 'testPrompt',
562
- response: text,
563
- response_json: parsed,
564
- response_id: chatCompletion.id || '',
565
- user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
566
- model_used: 'gpt-4o',
567
- created: (new Date()).toISOString(),
568
- creator_type: creator_type,
569
- creator_id: creator_id
570
- };
571
- JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
572
- } catch(_e){ /* best-effort only */ }
573
- return {payload,chatCompletion,content:text};
574
- } catch (error) {
575
- if (error.status === 429) {
576
- return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
577
- } else {
578
- return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
579
- }
580
- }
581
- }
582
-
583
- this.sendInitialConsultTranscript= async function(data, req, res) {
584
- coloredLog("sendInitialConsultTranscript");
585
- //get the prompt object from the prompt id
586
- //get the business object from the refrenced object id
587
- //see if there is a initial_transcript_url property on that object
588
- //if there is, get the content of the file
589
- //send the content to chatgpt, with the template property of the prompt object
590
- //get the response
591
- try {
592
- var payload = {
593
- params: req.params,
594
- data: data
595
- };
596
- } catch (e) {
597
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
598
- }
599
- var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
600
- var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
601
-
602
-
603
- // See if there is an initial_transcript_url property on that object
604
- const transcriptUrl = businessOBJ.initial_transcript_url;
605
- if (!transcriptUrl) {
606
- return res.jsonp({ error: 'No initial transcript URL found' });
607
- }
608
-
609
- //Get the content of the file from Google Docs
610
- const transcriptContent = await getGoogleDocContent(transcriptUrl);
611
- if (!transcriptContent || transcriptContent.error) {
612
- return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
613
- }
614
- const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
615
- payload.tokenCount = tokenCount;
616
- coloredLog("token count: "+tokenCount);
617
- //return res.jsonp({tokens:tokenCount,content:transcriptContent});
618
- // Send the content to ChatGPT, with the template property of the prompt object
619
- const client = new OpenAI({
620
- apiKey: getAPIKey(), // This is the default and can be omitted
621
- });
622
-
623
- const chatResponse = await client.chat.completions.create({
624
- messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
625
- model: 'gpt-4o',
626
- });
627
-
628
- // Get the response
629
- const chatContent = chatResponse.choices[0].message.content;
630
- const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
631
- // Save the response
632
- await saveAIResponse({
633
- name:responseName,
634
- business: data.business,
635
- ai_prompt: data.ai_prompt,
636
- response: chatContent,
637
- payload,
638
- prompt_method:req.params.method
639
- }, req && req.User);
640
- coloredLog("response saved -"+responseName);
641
- return {payload,
642
- businessOBJ,
643
- promptOBJ,
644
- chatContent,
645
- responseName
646
- };
647
-
648
- }
649
-
650
- async function getGoogleDocContent(docUrl) {
651
- try {
652
- const auth = new google.auth.GoogleAuth({
653
- scopes: ['https://www.googleapis.com/auth/documents.readonly']
654
- });
655
- //get google docs apikey from settings
656
- const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
657
- const docs = google.docs({ version: 'v1', auth:google_auth });
658
- const docId = extractDocIdFromUrl(docUrl);
659
- const doc = await docs.documents.get({ documentId: docId });
660
-
661
- let content = doc.data.body.content.map(element => {
662
- if (element.paragraph && element.paragraph.elements) {
663
- return element.paragraph.elements.map(
664
- e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
665
- : ''
666
- ).join('');
667
- }
668
- return '';
669
- }).join('\n');
670
-
671
- // Remove timestamps and line numbers
672
- //content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
673
-
674
- return content;
675
- } catch (error) {
676
- console.error('Error fetching Google Doc content:', error);
677
- return {error};
678
- }
679
- }
680
- function countTokens(text, model = 'gpt-4o') {
681
- const enc = encoding_for_model(model);
682
- const tokens = enc.encode(text);
683
- return tokens.length;
684
- }
685
- function extractDocIdFromUrl(url) {
686
- const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
687
- return match ? match[1] : null;
688
- }
689
-
690
- async function saveAIResponse(data, user) {
691
- try {
692
- var creator_type = null;
693
- var creator_id = null;
694
- try{
695
- if (user && user._id){
696
- creator_type = 'user';
697
- creator_id = user._id;
698
- }
699
- }catch(_e){}
700
- const aiResponse = {
701
- name: data.name,
702
- itemtype: 'ai_response',
703
- business: data.business,
704
- ai_prompt: data.ai_prompt,
705
- response: data.response,
706
- payload: data.payload,
707
- prompt_method:data.prompt_method,
708
- created: (new Date).toISOString(),
709
- _id:cuid(),
710
- creator_type: creator_type,
711
- creator_id: creator_id
712
- // Add any other fields you want to save
713
- };
714
- await new Promise((resolve, reject) => {
715
- JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
716
- if (err) {
717
- coloredLog('Error saving AI response: ' + err);
718
- reject(err);
719
- } else {
720
- coloredLog('AI response saved successfully');
721
- resolve(result);
722
- }
723
- });
724
- });
725
- } catch (error) {
726
- coloredLog('Error in saveAIResponse: ' + error);
727
- }
728
- }
729
-
730
- // Normalize model output that should contain JSON. Models often wrap JSON
731
- // in markdown fences (```json ... ```), and may prepend/append prose. This
732
- // helper strips fences and tries to isolate the first well-formed JSON
733
- // object/array substring so JSON.parse has the best chance of succeeding.
734
- function extractJsonText(raw) {
735
- if (!raw) { return ''; }
736
- let t = String(raw).trim();
737
- // If there is any ```...``` fenced block, prefer its contents.
738
- const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
739
- if (fenceIdx !== -1) {
740
- let start = fenceIdx;
741
- const firstNewline = t.indexOf('\n', start);
742
- if (firstNewline !== -1) {
743
- t = t.substring(firstNewline + 1);
744
- } else {
745
- t = t.substring(start + 3);
746
- }
747
- const lastFence = t.lastIndexOf('```');
748
- if (lastFence !== -1) {
749
- t = t.substring(0, lastFence);
750
- }
751
- t = t.trim();
752
- }
753
- // If there's extra prose around the JSON, slice from first {/[ to last }/]
754
- if (t[0] !== '{' && t[0] !== '[') {
755
- const firstBrace = t.indexOf('{');
756
- const firstBracket = t.indexOf('[');
757
- let first = -1;
758
- if (firstBrace === -1) { first = firstBracket; }
759
- else if (firstBracket === -1) { first = firstBrace; }
760
- else { first = Math.min(firstBrace, firstBracket); }
761
- const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
762
- if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
763
- t = t.slice(first, lastBrace + 1);
764
- }
765
- }
766
- return t.trim();
767
- }
768
-
769
- // Autofill feature (Responses API; supports assistant_id or model)
770
- this.autofill = async function (data, req, res) {
771
- const startedAt = Date.now();
772
- try {
773
- const body = data || {};
774
- const objectId = body.object_id || body._id;
775
- const object = body.object || $J.get(objectId);
776
- const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
777
- const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
778
- const rawFields = body.fields || body.field;
779
- const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
780
- const userPrompt = body.prompt || '';
781
- const assistantId = body.assistant_id || null;
782
-
783
- if (!object) {
784
- return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
785
- }
786
- if (!schemaName) {
787
- return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
788
- }
789
- if (!fields.length) {
790
- return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
791
- }
792
-
793
- const flattened = JOE.Utils.flattenObject(object._id);
794
- const systemText = [
795
- 'You are JOE (Json Object Editor) assistant.',
796
- 'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
797
- '- Respect field types (text, number, arrays, enums, references).',
798
- '- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
799
- '- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
800
- '- If a field is an array, return an array of values.',
801
- '- Never modify unrelated fields.',
802
- '- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
803
- '- If you lack sufficient information, return an empty patch.'
804
- ].join('\\n');
805
-
806
- const schemaForContext = schemaSummary || schemaFull || {};
807
- const userInput = JSON.stringify({
808
- action: 'autofill_fields',
809
- target_schema: schemaName,
810
- requested_fields: fields,
811
- user_prompt: userPrompt,
812
- object_context: flattened,
813
- schema_context: schemaForContext
814
- }, null, ' ');
815
-
816
- const openai = newClient();
817
- const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
818
-
819
- // For simplicity and robustness, use plain text output and instruct the
820
- // model to return a strict JSON object. We previously attempted the
821
- // Responses `json_schema` response_format, but the SDK shape can change
822
- // and is harder to parse reliably; text + JSON.parse is sufficient here.
823
- const requestBase = {
824
- temperature: 0.2,
825
- instructions: systemText,
826
- input: userInput
827
- };
828
- // Optional web_search tool: if the caller sets allow_web truthy, expose
829
- // the built-in web_search capability and let the model decide when to
830
- // call it.
831
- if (body.allow_web) {
832
- coloredLog("allowing web search");
833
- requestBase.tools = [{ type: 'web_search' }];
834
- requestBase.tool_choice = 'auto';
835
- }
836
-
837
- let response;
838
- if (assistantId) {
839
- response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
840
- } else {
841
- response = await openai.responses.create({ model, ...requestBase });
842
- }
843
-
844
- let textOut = '';
845
- try { textOut = response.output_text || ''; } catch (_e) {}
846
- coloredLog("textOut: "+textOut);
847
- if (!textOut && response && Array.isArray(response.output)) {
848
- for (let i = 0; i < response.output.length; i++) {
849
- const item = response.output[i];
850
- if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
851
- const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
852
- if (textPart && (textPart.text || textPart.output_text)) {
853
- textOut = textPart.text || textPart.output_text;
854
- break;
855
- }
856
- }
857
- }
858
- }
859
-
860
- let patch = {};
861
- try {
862
- const jsonText = extractJsonText(textOut);
863
- const parsed = JSON.parse(jsonText || '{}');
864
- patch = parsed.patch || {};
865
- } catch (_e) {
866
- console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
867
- }
868
- coloredLog("patch: "+JSON.stringify(patch));
869
- const filteredPatch = {};
870
- fields.forEach(function (f) {
871
- if (Object.prototype.hasOwnProperty.call(patch, f)) {
872
- filteredPatch[f] = patch[f];
873
- }
874
- });
875
- // If we got no fields back on the first attempt, retry once before
876
- // giving up. Avoid infinite loops by marking a retry flag.
877
- if (!Object.keys(filteredPatch).length && !body._retry) {
878
- coloredLog('[autofill] empty patch, retrying once');
879
- const retryBody = Object.assign({}, body, { _retry: true });
880
- return await self.autofill(retryBody, req, res);
881
- }
882
-
883
- // Optional save
884
- let savedItem = null;
885
- if (body.save_history || body.save_itemtype) {
886
- const targetItemtype = body.save_itemtype || 'ai_response';
887
- if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
888
- const saveObj = {
889
- itemtype: targetItemtype,
890
- name: `[${schemaName}] autofill ${fields.join(', ')}`,
891
- object_id: object._id,
892
- target_schema: schemaName,
893
- fields,
894
- prompt: userPrompt,
895
- patch: filteredPatch,
896
- model,
897
- raw: { response }
898
- };
899
- await new Promise(function (resolve) {
900
- JOE.Storage.save(saveObj, targetItemtype, function (_err, saved) {
901
- savedItem = saved || null;
902
- resolve();
903
- });
904
- });
905
- }
906
- }
907
-
908
- return {
909
- success: true,
910
- patch: filteredPatch,
911
- model,
912
- usage: response && response.usage,
913
- saved: !!savedItem,
914
- saved_item: savedItem,
915
- elapsed_ms: Date.now() - startedAt
916
- };
917
- } catch (e) {
918
- return { success: false, error: e && e.message || 'Unknown error' };
919
- }
920
- };
921
-
922
- this.getResponse = function(data, req, res) {
923
- try {
924
- var prompt = data.prompt;
925
- if (!prompt) {
926
- return { error: 'No prompt provided' };
927
- }
928
-
929
- // Simulate a response from ChatGPT
930
- var response = `ChatGPT response to: ${prompt}`;
931
- res.jsonp({ response: response });
932
- return { use_callback: true };
933
- } catch (e) {
934
- return { errors: 'plugin error: ' + e, failedat: 'plugin' };
935
- }
936
- };
937
-
938
- this.html = function(data, req, res) {
939
- return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
940
- };
941
- /* NEW AI RESPONSE API*/
942
-
943
- this.executeJOEAiPrompt = async function(data, req, res) {
944
- const referencedObjectIds = []; // Track all objects touched during helper function
945
- try {
946
- const promptId = data.ai_prompt;
947
- // Support both payload shapes: { ai_prompt, params:{...}, ... } and flat
948
- const params = (data && (data.params || data)) || {};
949
-
950
- if (!promptId) {
951
- return { error: "Missing prompt_id." };
952
- }
953
-
954
- const prompt = await $J.get(promptId); // Use $J.get for consistency
955
- if (!prompt) {
956
- return { error: "Prompt not found." };
957
- }
958
-
959
- let instructions = prompt.instructions || "";
960
- let finalInstructions=instructions;
961
- let finalInput='';
962
- // Pre-load all content_objects if content_items exist
963
- const contentObjects = {};
964
-
965
- if (prompt.content_items && Array.isArray(prompt.content_items)) {
966
- for (const content of prompt.content_items) {
967
- if (params[content.reference]) {
968
- const obj = $J.get(params[content.reference]);
969
- if (obj) {
970
- contentObjects[content.itemtype] = obj;
971
-
972
- // Pre-track referenced object
973
- if (obj._id && !referencedObjectIds.includes(obj._id)) {
974
- referencedObjectIds.push(obj._id);
975
- }
976
- }
977
- }
978
- }
979
- }
980
-
981
- // Execute any helper functions if present
982
- if (prompt.functions) {
983
- const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
984
- const helperResult = await modFunc({
985
- instructions,
986
- params,
987
- ai_prompt: prompt,
988
- content_objects: contentObjects,
989
- trackObject: (obj) => {
990
- if (obj?._id && !referencedObjectIds.includes(obj._id)) {
991
- referencedObjectIds.push(obj._id);
992
- }
993
- }
994
- });
995
-
996
- if (typeof helperResult === 'object' && helperResult.error) {
997
- return { error: helperResult.error };
998
- }
999
-
1000
- // Assume the result is { instructions, input }
1001
- finalInstructions = helperResult.instructions || instructions;
1002
- finalInput = helperResult.input;
1003
- }
1004
-
1005
- const openai = newClient(); // however your OpenAI client is created
1006
-
1007
- const payload = {
1008
- model: prompt.ai_model || "gpt-4o",
1009
- instructions: finalInstructions||instructions, // string only
1010
- input:finalInput||'',
1011
- tools: prompt.tools || [{ "type": "web_search" }],
1012
- tool_choice: prompt.tool_choice || "auto",
1013
- temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
1014
- //return_token_usage: true
1015
- //max_tokens: prompt.max_tokens ?? 1200
1016
- };
1017
- coloredLog(`${payload.model} and ${payload.temperature}`);
1018
- const mode = (prompt.attachments_mode || 'direct');
1019
- if (Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1020
- if (mode === 'file_search'){
1021
- // Use file_search tool and attach vector store
1022
- try{
1023
- const ensured = await ensureVectorStoreForFiles(data.openai_file_ids);
1024
- payload.tools = payload.tools || [];
1025
- if(!payload.tools.find(t => t && t.type === 'file_search')){
1026
- payload.tools.push({ type:'file_search' });
1027
- }
1028
- payload.tool_resources = Object.assign({}, payload.tool_resources, {
1029
- file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
1030
- });
1031
- // Keep input as text only (if any)
1032
- if (finalInput && String(finalInput).trim().length){
1033
- payload.input = finalInput;
1034
- }
1035
- }catch(e){
1036
- console.warn('[chatgpt] file_search setup failed; falling back to direct parts', e && e.message || e);
1037
- // Fall back to direct parts
1038
- const parts = [];
1039
- if (finalInput && String(finalInput).trim().length){
1040
- parts.push({ type:'input_text', text: String(finalInput) });
1041
- }
1042
- data.openai_file_ids.slice(0,10).forEach(function(id){
1043
- parts.push({ type:'input_file', file_id: id });
1044
- });
1045
- payload.input = [ { role:'user', content: parts } ];
1046
- }
1047
- } else {
1048
- // Direct context stuffing: input parts
1049
- const parts = [];
1050
- if (finalInput && String(finalInput).trim().length){
1051
- parts.push({ type:'input_text', text: String(finalInput) });
1052
- }
1053
- data.openai_file_ids.slice(0,10).forEach(function(id){
1054
- parts.push({ type:'input_file', file_id: id });
1055
- });
1056
- payload.input = [ { role:'user', content: parts } ];
1057
- }
1058
- }
1059
- const response = await safeResponsesCreate(openai, payload);
1060
-
1061
-
1062
- // const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
1063
-
1064
- // const response = await openai.chat.completions.create(payload);
1065
-
1066
- const saved = await saveAiResponseRefactor({
1067
- prompt,
1068
- ai_response_content: response.output_text || "",
1069
- user_prompt: payload.input,
1070
- params,
1071
- referenced_object_ids: referencedObjectIds,
1072
- response_id:response.id,
1073
- usage: response.usage || {},
1074
- user: req && req.User,
1075
- ai_assistant_id: data.ai_assistant_id
1076
- });
1077
- try{
1078
- if (saved && Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1079
- saved.used_openai_file_ids = data.openai_file_ids.slice(0,10);
1080
- await new Promise(function(resolve){
1081
- JOE.Storage.save(saved,'ai_response',function(){ resolve(); },{ user: req && req.User, history:false });
1082
- });
1083
- }
1084
- }catch(_e){}
1085
-
1086
- return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
1087
- } catch (e) {
1088
- console.error('❌ executeJOEAiPrompt error:', e);
1089
- return { error: "Failed to execute AI prompt.",message: e.message };
1090
- }
1091
- };
1092
-
1093
- function createResponsePayload(prompt, params, instructions, user_prompt) {
1094
- return {
1095
- model: prompt.model || "gpt-4o",
1096
- messages: [
1097
- { role: "system", content: instructions },
1098
- { role: "user", content: user_prompt || "" }
1099
- ],
1100
- tools: prompt.tools || undefined,
1101
- tool_choice: prompt.tool_choice || "auto",
1102
- temperature: prompt.temperature ?? 0.7,
1103
- max_tokens: prompt.max_tokens ?? 1200
1104
- };
1105
- }
1106
- async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id}) {
1107
- var response_keys = [];
1108
- try {
1109
- response_keys = Object.keys(JSON.parse(ai_response_content));
1110
- }catch (e) {
1111
- console.error('❌ Error parsing AI response content for keys:', e);
1112
- }
1113
- // Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
1114
- let parsedResponse = null;
1115
- try {
1116
- const jt = extractJsonText(ai_response_content);
1117
- if (jt) {
1118
- parsedResponse = JSON.parse(jt);
1119
- }
1120
- } catch(_e) {
1121
- parsedResponse = null;
1122
- }
1123
- var creator_type = null;
1124
- var creator_id = null;
1125
- try{
1126
- if (ai_assistant_id){
1127
- creator_type = 'ai_assistant';
1128
- creator_id = ai_assistant_id;
1129
- } else if (user && user._id){
1130
- creator_type = 'user';
1131
- creator_id = user._id;
1132
- }
1133
- }catch(_e){}
1134
- const aiResponse = {
1135
- name: `${prompt.name}`,
1136
- itemtype: 'ai_response',
1137
- ai_prompt: prompt._id,
1138
- prompt_name: prompt.name,
1139
- prompt_method:prompt.prompt_method,
1140
- response: ai_response_content,
1141
- response_json: parsedResponse,
1142
- response_keys: response_keys,
1143
- response_id:response_id||'',
1144
- user_prompt: user_prompt,
1145
- params_used: params,
1146
- usage: usage || {},
1147
- tags: prompt.tags || [],
1148
- model_used: prompt.ai_model || "gpt-4o",
1149
- referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
1150
- created: (new Date).toISOString(),
1151
- _id: cuid(),
1152
- creator_type: creator_type,
1153
- creator_id: creator_id
1154
- };
1155
-
1156
- await new Promise((resolve, reject) => {
1157
- JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
1158
- if (err) {
1159
- console.error('❌ Error saving AI response:', err);
1160
- reject(err);
1161
- } else {
1162
- console.log('✅ AI response saved successfully');
1163
- resolve(result);
1164
- }
1165
- });
1166
- });
1167
-
1168
- return aiResponse;
1169
- }
1170
-
1171
- // ---------- Widget chat endpoints (Responses API + optional assistants) ----------
1172
- function normalizeMessages(messages) {
1173
- if (!Array.isArray(messages)) { return []; }
1174
- return messages.map(function (m) {
1175
- return {
1176
- role: m.role || 'assistant',
1177
- content: m.content || '',
1178
- created_at: m.created_at || m.created || new Date().toISOString()
1179
- };
1180
- });
1181
- }
1182
-
1183
- /**
1184
- * widgetStart
1185
- *
1186
- * Purpose:
1187
- * Create and persist a new `ai_widget_conversation` record for the
1188
- * external `<joe-ai-widget>` chat component. This is a lightweight
1189
- * conversation record that stores model, assistant, system text and
1190
- * messages for the widget.
1191
- *
1192
- * Inputs (data):
1193
- * - model (optional) override model for the widget
1194
- * - ai_assistant_id (optional) JOE ai_assistant cuid
1195
- * - system (optional) explicit system text
1196
- * - source (optional) freeform source tag, defaults to "widget"
1197
- *
1198
- * OpenAI calls:
1199
- * - None. This endpoint only touches storage.
1200
- *
1201
- * Output:
1202
- * - { success, conversation_id, model, assistant_id }
1203
- * where assistant_id is the OpenAI assistant_id (if present).
1204
- */
1205
- this.widgetStart = async function (data, req, res) {
1206
- try {
1207
- var body = data || {};
1208
- // Default to a modern chat model when no assistant/model is provided.
1209
- // If an assistant is supplied, its ai_model will override this.
1210
- var model = body.model || "gpt-5.1";
1211
- var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
1212
- var system = body.system || (assistant && assistant.instructions) || "";
1213
- // Prefer explicit user fields coming from the client (ai-widget-test page
1214
- // passes _joe.User fields). Widget endpoints no longer infer from req.User
1215
- // to keep a single, explicit source of truth.
1216
- var user = null;
1217
- if (body.user_id || body.user_name || body.user_color) {
1218
- user = {
1219
- _id: body.user_id,
1220
- name: body.user_name,
1221
- fullname: body.user_name,
1222
- color: body.user_color
1223
- };
1224
- }
1225
- var user_color = (body.user_color) || (user && user.color) || null;
1226
-
1227
- var convo = {
1228
- _id: (typeof cuid === 'function') ? cuid() : undefined,
1229
- itemtype: "ai_widget_conversation",
1230
- model: (assistant && assistant.ai_model) || model,
1231
- assistant: assistant && assistant._id,
1232
- assistant_id: assistant && assistant.assistant_id,
1233
- assistant_color: assistant && assistant.assistant_color,
1234
- user: user && user._id,
1235
- user_name: user && (user.fullname || user.name),
1236
- user_color: user_color,
1237
- system: system,
1238
- messages: [],
1239
- source: body.source || "widget",
1240
- created: new Date().toISOString(),
1241
- joeUpdated: new Date().toISOString()
1242
- };
1243
-
1244
- const saved = await new Promise(function (resolve, reject) {
1245
- // Widget conversations are lightweight and do not need full history diffs.
1246
- JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
1247
- if (err) return reject(err);
1248
- resolve(result);
1249
- }, { history: false });
1250
- });
1251
-
1252
- return {
1253
- success: true,
1254
- conversation_id: saved._id,
1255
- model: saved.model,
1256
- assistant_id: saved.assistant_id || null,
1257
- assistant_color: saved.assistant_color || null,
1258
- user_color: saved.user_color || user_color || null
1259
- };
1260
- } catch (e) {
1261
- console.error("[chatgpt] widgetStart error:", e);
1262
- return { success: false, error: e && e.message || "Unknown error" };
1263
- }
1264
- };
1265
-
1266
- /**
1267
- * widgetHistory
1268
- *
1269
- * Purpose:
1270
- * Load an existing `ai_widget_conversation` and normalize its
1271
- * messages for use by `<joe-ai-widget>` on page load or refresh.
1272
- *
1273
- * Inputs (data):
1274
- * - conversation_id or _id: the widget conversation cuid
1275
- *
1276
- * OpenAI calls:
1277
- * - None. Purely storage + normalization.
1278
- *
1279
- * Output:
1280
- * - { success, conversation_id, model, assistant_id, messages }
1281
- */
1282
- this.widgetHistory = async function (data, req, res) {
1283
- try {
1284
- var conversation_id = data.conversation_id || data._id;
1285
- if (!conversation_id) {
1286
- return { success: false, error: "Missing conversation_id" };
1287
- }
1288
- const convo = await new Promise(function (resolve, reject) {
1289
- JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1290
- if (err) return reject(err);
1291
- resolve(results && results[0]);
1292
- });
1293
- });
1294
- if (!convo) {
1295
- return { success: false, error: "Conversation not found" };
1296
- }
1297
-
1298
- convo.messages = normalizeMessages(convo.messages);
1299
- return {
1300
- success: true,
1301
- conversation_id: convo._id,
1302
- model: convo.model,
1303
- assistant_id: convo.assistant_id || null,
1304
- assistant_color: convo.assistant_color || null,
1305
- user_color: convo.user_color || null,
1306
- messages: convo.messages
1307
- };
1308
- } catch (e) {
1309
- console.error("[chatgpt] widgetHistory error:", e);
1310
- return { success: false, error: e && e.message || "Unknown error" };
1311
- }
1312
- };
1313
-
1314
- /**
1315
- * widgetMessage
1316
- *
1317
- * Purpose:
1318
- * Handle a single user turn for `<joe-ai-widget>`:
1319
- * - Append the user message to the stored conversation.
1320
- * - Call OpenAI Responses (optionally with tools from the selected
1321
- * `ai_assistant`, via runWithTools + MCP).
1322
- * - Append the assistant reply, persist the conversation, and return
1323
- * the full message history plus the latest assistant message.
1324
- *
1325
- * Inputs (data):
1326
- * - conversation_id or _id: cuid of the widget conversation
1327
- * - content: user text
1328
- * - role: user role, defaults to "user"
1329
- * - assistant_id: optional OpenAI assistant_id (used only to
1330
- * locate the JOE ai_assistant config)
1331
- * - model: optional model override
1332
- *
1333
- * OpenAI calls:
1334
- * - responses.create (once if no tools; twice when tools are present):
1335
- * * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
1336
- * * Any tool calls are executed via MCP and injected as `tool` messages.
1337
- * * Second call is plain Responses with updated messages.
1338
- *
1339
- * Output:
1340
- * - { success, conversation_id, model, assistant_id, messages,
1341
- * last_message, usage }
1342
- */
1343
- this.widgetMessage = async function (data, req, res) {
1344
- try {
1345
- var body = data || {};
1346
- var conversation_id = body.conversation_id || body._id;
1347
- var content = body.content;
1348
- var role = body.role || "user";
1349
-
1350
- if (!conversation_id || !content) {
1351
- return { success: false, error: "Missing conversation_id or content" };
1352
- }
1353
-
1354
- const convo = await new Promise(function (resolve, reject) {
1355
- JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1356
- if (err) return reject(err);
1357
- resolve(results && results[0]);
1358
- });
1359
- });
1360
- if (!convo) {
1361
- return { success: false, error: "Conversation not found" };
1362
- }
1363
-
1364
- convo.messages = normalizeMessages(convo.messages);
1365
- const nowIso = new Date().toISOString();
1366
-
1367
- // Append user message
1368
- const userMsg = { role: role, content: content, created_at: nowIso };
1369
- convo.messages.push(userMsg);
1370
-
1371
- // Backfill user metadata (id/name/color) on older conversations that
1372
- // were created before we started storing these fields. Prefer explicit
1373
- // body fields only; we no longer infer from req.User so that widget
1374
- // calls always have a single, explicit user source.
1375
- var u = null;
1376
- if (body.user_id || body.user_name || body.user_color) {
1377
- u = {
1378
- _id: body.user_id,
1379
- name: body.user_name,
1380
- fullname: body.user_name,
1381
- color: body.user_color
1382
- };
1383
- }
1384
- if (u) {
1385
- if (!convo.user && u._id) {
1386
- convo.user = u._id;
1387
- }
1388
- if (!convo.user_name && (u.fullname || u.name)) {
1389
- convo.user_name = u.fullname || u.name;
1390
- }
1391
- if (!convo.user_color && u.color) {
1392
- convo.user_color = u.color;
1393
- }
1394
- }
1395
-
1396
- const assistantId = body.assistant_id || convo.assistant_id || null;
1397
- // NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
1398
- // We do NOT pass assistant_id to the Responses API (it is not supported in the
1399
- // version we are using); instead we look up the JOE ai_assistant by assistant_id
1400
- // and inject its configuration (model, instructions, tools) into the request.
1401
- var assistantObj = null;
1402
- if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
1403
- assistantObj = JOE.Data.ai_assistant.find(function (a) {
1404
- return a && a.assistant_id === assistantId;
1405
- }) || null;
1406
- }
1407
- const openai = newClient();
1408
- const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
1409
-
1410
- // Prefer explicit system text on the conversation, then assistant instructions.
1411
- const systemText = (convo.system && String(convo.system)) ||
1412
- (assistantObj && assistantObj.instructions) ||
1413
- "";
1414
- const messagesForModel = convo.messages.map(function (m) {
1415
- return { role: m.role, content: m.content };
1416
- });
1417
-
1418
- // Use runWithTools so that, when an assistant has tools configured,
1419
- // we let the model call those tools via MCP before generating a
1420
- // final response.
1421
- const runResult = await runWithTools({
1422
- openai: openai,
1423
- model: model,
1424
- systemText: systemText,
1425
- messages: messagesForModel,
1426
- assistant: assistantObj,
1427
- req: req
1428
- });
1429
-
1430
- // If tools were called this turn, inject a small meta message so the
1431
- // widget clearly shows which functions ran before the assistant reply.
1432
- if (runResult.toolCalls && runResult.toolCalls.length) {
1433
- const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
1434
- .filter(Boolean)
1435
- .join(', ');
1436
- convo.messages.push({
1437
- role: "assistant",
1438
- meta: "tools_used",
1439
- content: "[Tools used this turn: " + names + "]",
1440
- created_at: nowIso
1441
- });
1442
- }
1443
-
1444
- const assistantText = runResult.finalText || "";
1445
- const assistantMsg = {
1446
- role: "assistant",
1447
- content: assistantText,
1448
- created_at: new Date().toISOString()
1449
- };
1450
- convo.messages.push(assistantMsg);
1451
- convo.last_message_at = assistantMsg.created_at;
1452
- convo.joeUpdated = assistantMsg.created_at;
1453
-
1454
- await new Promise(function (resolve, reject) {
1455
- // Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
1456
- JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
1457
- if (err) return reject(err);
1458
- resolve(saved);
1459
- }, { history: false });
1460
- });
1461
-
1462
- return {
1463
- success: true,
1464
- conversation_id: convo._id,
1465
- model: model,
1466
- assistant_id: assistantId,
1467
- assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
1468
- user_color: convo.user_color || ((u && u.color) || null),
1469
- messages: convo.messages,
1470
- last_message: assistantMsg,
1471
- // Usage comes from the underlying Responses call inside runWithTools.
1472
- usage: (runResult.response && runResult.response.usage) || {}
1473
- };
1474
- } catch (e) {
1475
- console.error("[chatgpt] widgetMessage error:", e);
1476
- return { success: false, error: e && e.message || "Unknown error" };
1477
- }
1478
- };
1479
-
1480
- // Mark async plugin methods so Server.pluginHandling will await them.
1481
- this.async = {
1482
- executeJOEAiPrompt: this.executeJOEAiPrompt,
1483
- testPrompt: this.testPrompt,
1484
- sendInitialConsultTranscript: this.sendInitialConsultTranscript,
1485
- widgetStart: this.widgetStart,
1486
- widgetHistory: this.widgetHistory,
1487
- widgetMessage: this.widgetMessage,
1488
- autofill: this.autofill,
1489
- filesRetryFromUrl: this.filesRetryFromUrl
1490
- };
1491
- this.protected = [,'testPrompt'];
1492
- return self;
1493
- }
1494
-
1495
- module.exports = new ChatGPT();
1
+ const OpenAI = require("openai");
2
+ const { google } = require('googleapis');
3
+ const path = require('path');
4
+ const os = require('os');
5
+ const fs = require('fs');
6
+ const MCP = require("../modules/MCP.js");
7
+ // const { name } = require("json-object-editor/server/webconfig");
8
+
9
+ function ChatGPT() {
10
+ // const fetch = (await import('node-fetch')).default;
11
+ //const openai = new OpenAI();
12
+ // Load the service account key JSON file
13
+ const serviceAccountKeyFile = path.join(__dirname, '../local-joe-239900-e9e3b447c70e.json');
14
+ const google_auth = new google.auth.GoogleAuth({
15
+ keyFile: serviceAccountKeyFile,
16
+ scopes: ['https://www.googleapis.com/auth/documents.readonly'],
17
+ });
18
+
19
+ var self = this;
20
+ this.async ={};
21
+ function coloredLog(message){
22
+ console.log(JOE.Utils.color('[chatgpt]', 'plugin', false), message);
23
+ }
24
+ //xx -setup and send a test prompt to chatgpt
25
+ //xx get the api key from joe settings
26
+
27
+ //get a prompt from id
28
+ //send the prompt to chatgpt
29
+
30
+ //++get the cotnent of a file
31
+ //++send the content of a file to chatgpt
32
+
33
+ //++ structure data
34
+ //++ save the response to an ai_repsonse
35
+ //create an ai_response
36
+ //store the content
37
+ //attach to the request
38
+ //store ids sent with the request
39
+ this.default = function(data, req, res) {
40
+ try {
41
+ var payload = {
42
+ params: req.params,
43
+ data: data
44
+ };
45
+ } catch (e) {
46
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
47
+ }
48
+ return payload;
49
+ };
50
+ function getAPIKey() {
51
+ const setting = JOE.Utils.Settings('OPENAI_API_KEY');
52
+ if (!setting) throw new Error("Missing OPENAI_API_KEY setting");
53
+ return setting;
54
+ }
55
+ function getSchemaDef(name) {
56
+ if (!name) return { full: null, summary: null };
57
+ const full = JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[name];
58
+ const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
59
+ return { full, summary };
60
+ }
61
+ /**
62
+ * callMCPTool
63
+ *
64
+ * Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
65
+ * without going over HTTP or worrying about POST size limits.
66
+ *
67
+ * Usage:
68
+ * const result = await callMCPTool('listSchemas', {}, { req });
69
+ *
70
+ * Notes:
71
+ * - `toolName` must exist on MCP.tools.
72
+ * - `params` should be a plain JSON-serializable object.
73
+ * - `ctx` is optional and can pass `{ req }` or other context that MCP
74
+ * tools might want (for auth, user, etc.).
75
+ */
76
+ async function callMCPTool(toolName, params = {}, ctx = {}) {
77
+ if (!MCP || !MCP.tools) {
78
+ throw new Error("MCP module not initialized; cannot call MCP tool");
79
+ }
80
+ if (!toolName || typeof toolName !== 'string') {
81
+ throw new Error("Missing or invalid MCP tool name");
82
+ }
83
+ const fn = MCP.tools[toolName];
84
+ if (typeof fn !== 'function') {
85
+ throw new Error(`MCP tool "${toolName}" not found`);
86
+ }
87
+ try {
88
+ // All MCP tools accept (params, ctx) and return a JSON-serializable result.
89
+ // The Responses / tools API often returns arguments as a JSON string, so
90
+ // normalize that here before invoking the tool.
91
+ let toolParams = params;
92
+ if (typeof toolParams === 'string') {
93
+ try {
94
+ toolParams = JSON.parse(toolParams);
95
+ } catch (parseErr) {
96
+ console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
97
+ // Fall back to passing the raw string so tools that expect it still work.
98
+ }
99
+ }
100
+ const result = await fn(toolParams || {}, ctx || {});
101
+ return result;
102
+ } catch (e) {
103
+ // Surface a clean error upstream but keep details in logs.
104
+ console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
105
+ throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
106
+ }
107
+ }
108
+
109
+ /**
110
+ * extractToolCalls
111
+ *
112
+ * Best-effort parser for tool calls from a Responses API result.
113
+ * The Responses output shape may evolve; this function looks for
114
+ * any "tool_call" typed content in response.output[*].content[*]
115
+ * and normalizes it into `{ name, arguments }` objects.
116
+ */
117
+ function extractToolCalls(response) {
118
+ var calls = [];
119
+ if (!response || !Array.isArray(response.output)) { return calls; }
120
+
121
+ response.output.forEach(function (item) {
122
+ if (!item) { return; }
123
+ // v1-style: item.type === 'tool_call'
124
+ if (item.type === 'function_call') {
125
+ calls.push({
126
+ name: item.name || item.function_name,
127
+ arguments: item.arguments || item.function_arguments || {}
128
+ });
129
+ }
130
+ // message-style: item.content is an array of parts
131
+ if (Array.isArray(item.content)) {
132
+ item.content.forEach(function (part) {
133
+ if (!part) { return; }
134
+ if (part.type === 'function_call') {
135
+ calls.push({
136
+ name: part.name || part.tool_name,
137
+ arguments: part.arguments || part.args || {}
138
+ });
139
+ }
140
+ });
141
+ }
142
+ });
143
+
144
+ return calls;
145
+ }
146
+
147
+ // Detect "request too large / token limit" style errors from the Responses API.
148
+ function isTokenLimitError(err) {
149
+ if (!err || typeof err !== 'object') return false;
150
+ if (err.status !== 429 && err.status !== 400) return false;
151
+ const msg = (err.error && err.error.message) || err.message || '';
152
+ if (!msg) return false;
153
+ const lower = String(msg).toLowerCase();
154
+ // Cover common phrasing from OpenAI for context/TPM limits.
155
+ return (
156
+ lower.includes('request too large') ||
157
+ lower.includes('too many tokens') ||
158
+ lower.includes('max tokens') ||
159
+ lower.includes('maximum context length') ||
160
+ lower.includes('tokens per min')
161
+ );
162
+ }
163
+
164
+ // Create a compact representation of a JOE object for use in slim payloads.
165
+ function slimJOEObject(item) {
166
+ if (!item || typeof item !== 'object') return item;
167
+ const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
168
+ const info = item.info || item.description || item.summary || '';
169
+ return {
170
+ _id: item._id,
171
+ itemtype: item.itemtype,
172
+ name: name,
173
+ info: info
174
+ };
175
+ }
176
+
177
+ // Given an `understandObject` result, produce a slimmed version:
178
+ // - keep `object` as-is
179
+ // - keep `flattened` for the main object (depth-limited) if present
180
+ // - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
181
+ // - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
182
+ function slimUnderstandObjectResult(result) {
183
+ if (!result || typeof result !== 'object') return result;
184
+ const out = {
185
+ _id: result._id,
186
+ itemtype: result.itemtype,
187
+ object: result.object,
188
+ // retain main flattened view if available; this is typically much smaller
189
+ flattened: result.flattened || null,
190
+ schemas: result.schemas || {},
191
+ tags: result.tags || {},
192
+ statuses: result.statuses || {},
193
+ slim: true
194
+ };
195
+ if (Array.isArray(result.related)) {
196
+ out.related = result.related.map(function (rel) {
197
+ if (!rel) return rel;
198
+ const base = rel.object || {};
199
+ const slim = slimJOEObject(base);
200
+ return {
201
+ field: rel.field,
202
+ _id: slim && slim._id || rel._id,
203
+ itemtype: slim && slim.itemtype || rel.itemtype,
204
+ object: slim
205
+ };
206
+ });
207
+ } else {
208
+ out.related = [];
209
+ }
210
+ return out;
211
+ }
212
+
213
+ // Walk the messages array and, for any system message containing a JSON payload
214
+ // of the form { "tool": "understandObject", "result": {...} }, replace the
215
+ // result with a slimmed version to reduce token count. Returns a new array; if
216
+ // nothing was changed, returns the original array.
217
+ function shrinkUnderstandObjectMessagesForTokens(messages) {
218
+ if (!Array.isArray(messages)) return messages;
219
+ let changed = false;
220
+ const shrunk = messages.map(function (msg) {
221
+ if (!msg || msg.role !== 'system') return msg;
222
+ if (typeof msg.content !== 'string') return msg;
223
+ try {
224
+ const parsed = JSON.parse(msg.content);
225
+ if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
226
+ return msg;
227
+ }
228
+ const slimmed = slimUnderstandObjectResult(parsed.result);
229
+ changed = true;
230
+ return {
231
+ ...msg,
232
+ content: JSON.stringify({ tool: 'understandObject', result: slimmed })
233
+ };
234
+ } catch (_e) {
235
+ return msg;
236
+ }
237
+ });
238
+ return changed ? shrunk : messages;
239
+ }
240
+
241
+ /**
242
+ * runWithTools
243
+ *
244
+ * Single orchestration function for calling the OpenAI Responses API
245
+ * with optional tools (sourced from a JOE `ai_assistant`), handling
246
+ * tool calls via MCP, and issuing a follow-up model call with the
247
+ * tool results injected.
248
+ *
249
+ * Inputs (opts):
250
+ * - openai: OpenAI client instance
251
+ * - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
252
+ * - systemText: string of system / instructions text
253
+ * - messages: array of { role, content } for the conversation so far
254
+ * - assistant: JOE `ai_assistant` object (may contain `tools`)
255
+ * - req: Express request (passed into MCP tools as context)
256
+ *
257
+ * Returns:
258
+ * - { response, finalText, messages, toolCalls }
259
+ * where `finalText` is the assistant-facing text (from output_text)
260
+ * and `messages` is the possibly-extended message list including
261
+ * any synthetic `tool` messages.
262
+ */
263
+ async function runWithTools(opts) {
264
+ const openai = opts.openai;
265
+ const model = opts.model;
266
+ const systemText = opts.systemText || "";
267
+ const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
268
+ const assistant = opts.assistant || null;
269
+ const req = opts.req;
270
+ const attachmentsMode = opts.attachments_mode || null;
271
+ const openaiFileIds = opts.openai_file_ids || null;
272
+
273
+ // Normalize tools: in many schemas tools may be stored as a JSON string;
274
+ // here we accept either an array or a JSON-stringified array.
275
+ let tools = null;
276
+ if (assistant && assistant.tools) {
277
+ if (Array.isArray(assistant.tools)) {
278
+ tools = assistant.tools;
279
+ } else if (typeof assistant.tools === 'string') {
280
+ try {
281
+ const parsed = JSON.parse(assistant.tools);
282
+ if (Array.isArray(parsed)) {
283
+ tools = parsed;
284
+ }
285
+ } catch (e) {
286
+ console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
287
+ }
288
+ }
289
+ }
290
+ // Normalize tool definitions for the Responses API. The assistant UI
291
+ // uses the Assistants-style shape ({ type:'function', function:{...} }),
292
+ // but Responses expects the name/description/parameters at the top level:
293
+ // { type:'function', name:'x', description:'...', parameters:{...} }
294
+ if (Array.isArray(tools)) {
295
+ tools = tools.map(function (t) {
296
+ if (t && t.type === 'function' && t.function && !t.name) {
297
+ const fn = t.function || {};
298
+ return {
299
+ type: 'function',
300
+ name: fn.name,
301
+ description: fn.description,
302
+ parameters: fn.parameters || {}
303
+ };
304
+ }
305
+ return t;
306
+ });
307
+ }
308
+
309
+ // No tools configured – do a simple single Responses call.
310
+ if (!tools) {
311
+ const resp = await openai.responses.create({
312
+ model: model,
313
+ instructions: systemText,
314
+ input: messages
315
+ });
316
+ return {
317
+ response: resp,
318
+ finalText: resp.output_text || "",
319
+ messages: messages,
320
+ toolCalls: []
321
+ };
322
+ }
323
+
324
+ // Step 1: call the model with tools enabled.
325
+ let firstPayload = {
326
+ model: model,
327
+ instructions: systemText,
328
+ input: messages,
329
+ tools: tools,
330
+ tool_choice: "auto"
331
+ };
332
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
333
+ try{
334
+ firstPayload = await attachFilesToResponsesPayload(openai, firstPayload, {
335
+ attachments_mode: attachmentsMode,
336
+ openai_file_ids: openaiFileIds
337
+ });
338
+ }catch(e){
339
+ console.warn('[chatgpt] runWithTools attachments failed; continuing without attachments', e && e.message || e);
340
+ }
341
+ }
342
+ const first = await openai.responses.create(firstPayload);
343
+
344
+ const toolCalls = extractToolCalls(first);
345
+
346
+ // If the model didn't decide to use tools, just return the first answer.
347
+ if (!toolCalls.length) {
348
+ return {
349
+ response: first,
350
+ finalText: first.output_text || "",
351
+ messages: messages,
352
+ toolCalls: []
353
+ };
354
+ }
355
+
356
+ // Step 2: execute each tool call via MCP and append tool results.
357
+ for (let i = 0; i < toolCalls.length; i++) {
358
+ const tc = toolCalls[i];
359
+ try {
360
+ const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
361
+ messages.push({
362
+ // Responses API does not support a "tool" role in messages.
363
+ // We inject tool outputs as a synthetic system message so
364
+ // the model can see the results without affecting the
365
+ // user/assistant turn structure.
366
+ role: "system",
367
+ content: JSON.stringify({ tool: tc.name, result: result })
368
+ });
369
+ } catch (e) {
370
+ console.error("[chatgpt] MCP tool error in runWithTools:", e);
371
+ messages.push({
372
+ role: "system",
373
+ content: JSON.stringify({
374
+ tool: tc.name,
375
+ error: e && e.message || "Tool execution failed"
376
+ })
377
+ });
378
+ }
379
+ }
380
+
381
+ // Step 3: ask the model again with tool outputs included.
382
+ let finalMessages = messages;
383
+ let second;
384
+ try {
385
+ let secondPayload = {
386
+ model: model,
387
+ instructions: systemText,
388
+ input: finalMessages
389
+ };
390
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
391
+ try{
392
+ secondPayload = await attachFilesToResponsesPayload(openai, secondPayload, {
393
+ attachments_mode: attachmentsMode,
394
+ openai_file_ids: openaiFileIds
395
+ });
396
+ }catch(e){
397
+ console.warn('[chatgpt] runWithTools second-call attachments failed; continuing without attachments', e && e.message || e);
398
+ }
399
+ }
400
+ second = await openai.responses.create(secondPayload);
401
+ } catch (e) {
402
+ if (isTokenLimitError(e)) {
403
+ console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
404
+ const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
405
+ // If nothing was shrunk, just rethrow the original error.
406
+ if (shrunk === finalMessages) {
407
+ throw e;
408
+ }
409
+ finalMessages = shrunk;
410
+ // Retry once with the smaller payload; let any error bubble up.
411
+ let retryPayload = {
412
+ model: model,
413
+ instructions: systemText,
414
+ input: finalMessages
415
+ };
416
+ if (attachmentsMode && Array.isArray(openaiFileIds) && openaiFileIds.length){
417
+ try{
418
+ retryPayload = await attachFilesToResponsesPayload(openai, retryPayload, {
419
+ attachments_mode: attachmentsMode,
420
+ openai_file_ids: openaiFileIds
421
+ });
422
+ }catch(e2){
423
+ console.warn('[chatgpt] runWithTools retry attachments failed; continuing without attachments', e2 && e2.message || e2);
424
+ }
425
+ }
426
+ second = await openai.responses.create(retryPayload);
427
+ } else {
428
+ throw e;
429
+ }
430
+ }
431
+
432
+ return {
433
+ response: second,
434
+ finalText: second.output_text || "",
435
+ messages: finalMessages,
436
+ toolCalls: toolCalls
437
+ };
438
+ }
439
+
440
+ // function newClient(){
441
+ // var key = getAPIKey();
442
+ // var c = new OpenAI({
443
+ // apiKey: key, // This is the default and can be omitted
444
+ // });
445
+ // if(!c || !c.apiKey){
446
+ // return { errors: 'No API key provided' };
447
+ // }
448
+ // return c;
449
+ // }
450
+ function newClient() {
451
+ return new OpenAI({ apiKey: getAPIKey() });
452
+ }
453
+
454
+ // Safely call Responses API with optional temperature/top_p.
455
+ // If the model rejects these parameters, strip and retry once.
456
+ async function safeResponsesCreate(openai, payload){
457
+ try{
458
+ return await openai.responses.create(payload);
459
+ }catch(e){
460
+ try{
461
+ var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
462
+ var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
463
+ var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
464
+ var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
465
+ var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
466
+ if (badTemp || badTopP || unknownTemp || unknownTopP){
467
+ var p2 = Object.assign({}, payload);
468
+ if (p2.hasOwnProperty('temperature')) delete p2.temperature;
469
+ if (p2.hasOwnProperty('top_p')) delete p2.top_p;
470
+ console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
471
+ return await openai.responses.create(p2);
472
+ }
473
+ }catch(_e){ /* fallthrough */ }
474
+ throw e;
475
+ }
476
+ }
477
+
478
+ // Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
479
+ async function ensureVectorStoreForFiles(fileIds = []){
480
+ const openai = newClient();
481
+ // Create ephemeral store per run (could be optimized to reuse/persist later)
482
+ const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
483
+ const storeId = vs.id;
484
+ // Link files by id
485
+ for (const fid of (fileIds||[]).slice(0,10)) {
486
+ try{
487
+ await openai.vectorStores.files.create(storeId, { file_id: fid });
488
+ }catch(e){
489
+ console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
490
+ }
491
+ }
492
+ // Poll (best-effort) until files are processed or timeout
493
+ const timeoutMs = 8000;
494
+ const start = Date.now();
495
+ try{
496
+ while(Date.now() - start < timeoutMs){
497
+ const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
498
+ const items = (listed && listed.data) || [];
499
+ const pending = items.some(f => f.status && f.status !== 'completed');
500
+ if(!pending){ break; }
501
+ await new Promise(r => setTimeout(r, 500));
502
+ }
503
+ }catch(_e){ /* non-fatal */ }
504
+ return { vectorStoreId: storeId };
505
+ }
506
+
507
+ // ---------------- OpenAI Files helpers ----------------
508
+ /**
509
+ * attachFilesToResponsesPayload
510
+ *
511
+ * Shared helper to wire OpenAI `responses.create` payloads with file
512
+ * attachments in a consistent way for both MCP and non‑MCP paths.
513
+ *
514
+ * Modes:
515
+ * - attachments_mode === 'file_search':
516
+ * - Ensures a temporary vector store via ensureVectorStoreForFiles.
517
+ * - Adds a `file_search` tool to payload.tools (if not already present).
518
+ * - Sets payload.tool_resources.file_search.vector_store_ids.
519
+ * - Leaves payload.input as text/messages.
520
+ *
521
+ * - attachments_mode === 'direct' (default):
522
+ * - Converts the existing `input` string (if any) into an `input_text`
523
+ * part and appends up to 10 `{ type:'input_file', file_id }` parts.
524
+ * - Sets payload.input = [{ role:'user', content: parts }].
525
+ *
526
+ * This function is intentionally file‑only; it does not modify instructions
527
+ * or other payload fields.
528
+ */
529
+ async function attachFilesToResponsesPayload(openai, payload, opts){
530
+ const mode = (opts && opts.attachments_mode) || 'direct';
531
+ const fileIds = (opts && opts.openai_file_ids) || [];
532
+ if (!Array.isArray(fileIds) || !fileIds.length) {
533
+ return payload;
534
+ }
535
+ if (mode === 'file_search') {
536
+ const ensured = await ensureVectorStoreForFiles(fileIds);
537
+ payload.tools = payload.tools || [];
538
+ if (!payload.tools.find(function(t){ return t && t.type === 'file_search'; })) {
539
+ payload.tools.push({ type:'file_search' });
540
+ }
541
+ payload.tool_resources = Object.assign({}, payload.tool_resources, {
542
+ file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
543
+ });
544
+ return payload;
545
+ }
546
+ // Default: direct context stuffing using input_text + input_file parts.
547
+ const parts = [];
548
+ if (typeof payload.input === 'string' && payload.input.trim().length) {
549
+ parts.push({ type:'input_text', text: String(payload.input) });
550
+ } else if (Array.isArray(payload.input)) {
551
+ // If caller already provided messages as input, preserve them by
552
+ // flattening into input_text where possible.
553
+ try{
554
+ const txt = JSON.stringify(payload.input);
555
+ if (txt && txt.length) {
556
+ parts.push({ type:'input_text', text: txt });
557
+ }
558
+ }catch(_e){}
559
+ }
560
+ fileIds.slice(0, 10).forEach(function(fid){
561
+ if (fid) {
562
+ parts.push({ type:'input_file', file_id: fid });
563
+ }
564
+ });
565
+ payload.input = [{ role:'user', content: parts }];
566
+ return payload;
567
+ }
568
+ async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
569
+ const openai = newClient();
570
+ const usePurpose = purpose || 'assistants';
571
+ const tmpDir = os.tmpdir();
572
+ const safeName = filename || ('upload_' + Date.now());
573
+ const tmpPath = path.join(tmpDir, safeName);
574
+ await fs.promises.writeFile(tmpPath, buffer);
575
+ try {
576
+ // openai.files.create accepts a readable stream
577
+ const fileStream = fs.createReadStream(tmpPath);
578
+ const created = await openai.files.create({
579
+ purpose: usePurpose,
580
+ file: fileStream
581
+ });
582
+ return { id: created.id, purpose: usePurpose };
583
+ } finally {
584
+ // best-effort cleanup
585
+ fs.promises.unlink(tmpPath).catch(()=>{});
586
+ }
587
+ }
588
+
589
+ // Expose a helper that other plugins can call in-process
590
+ this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
591
+ if (!buffer || !buffer.length) {
592
+ throw new Error('Missing buffer');
593
+ }
594
+ return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
595
+ };
596
+
597
+ // Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
598
+ this.filesRetryFromUrl = async function (data, req, res) {
599
+ try {
600
+ const { default: got } = await import('got');
601
+ const url = data && (data.url || data.location);
602
+ const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
603
+ const contentType = data && data.contentType || undefined;
604
+ const purpose = 'assistants';
605
+ if (!url) {
606
+ return { success: false, error: 'Missing url' };
607
+ }
608
+ const resp = await got(url, { responseType: 'buffer' });
609
+ const buffer = resp.body;
610
+ const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
611
+ return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
612
+ } catch (e) {
613
+ return { success: false, error: e && e.message || 'Retry upload failed' };
614
+ }
615
+ };
616
+ this.testPrompt= async function(data, req, res) {
617
+ try {
618
+ var payload = {
619
+ params: req.params,
620
+ data: data
621
+ };
622
+ } catch (e) {
623
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
624
+ }
625
+ const client = newClient();
626
+ if(client.errors){
627
+ return { errors: client.errors };
628
+ }
629
+ try {
630
+ const chatCompletion = await client.chat.completions.create({
631
+ messages: [{ role: 'user', content: 'Tell me a story about JOE: the json object editor in under 256 chars.' }],
632
+ model: 'gpt-4o',
633
+ });
634
+ coloredLog(chatCompletion);
635
+ const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
636
+ // Optionally persist as ai_response with parsed JSON when applicable
637
+ const parsed = (function(){
638
+ try {
639
+ const jt = extractJsonText(text);
640
+ return jt ? JSON.parse(jt) : null;
641
+ } catch(_e){ return null; }
642
+ })();
643
+ try {
644
+ var creator_type = null;
645
+ var creator_id = null;
646
+ try{
647
+ var u = req && req.User;
648
+ if (u && u._id){
649
+ creator_type = 'user';
650
+ creator_id = u._id;
651
+ }
652
+ }catch(_e){}
653
+ const aiResponse = {
654
+ itemtype: 'ai_response',
655
+ name: 'Test Prompt ChatGPT',
656
+ response_type: 'testPrompt',
657
+ response: text,
658
+ response_json: parsed,
659
+ response_id: chatCompletion.id || '',
660
+ user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
661
+ model_used: 'gpt-4o',
662
+ created: (new Date()).toISOString(),
663
+ creator_type: creator_type,
664
+ creator_id: creator_id
665
+ };
666
+ JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
667
+ } catch(_e){ /* best-effort only */ }
668
+ return {payload,chatCompletion,content:text};
669
+ } catch (error) {
670
+ if (error.status === 429) {
671
+ return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
672
+ } else {
673
+ return { errors: 'plugin error: ' + error.message, failedat: 'plugin' };
674
+ }
675
+ }
676
+ }
677
+
678
+ this.sendInitialConsultTranscript= async function(data, req, res) {
679
+ coloredLog("sendInitialConsultTranscript");
680
+ //get the prompt object from the prompt id
681
+ //get the business object from the refrenced object id
682
+ //see if there is a initial_transcript_url property on that object
683
+ //if there is, get the content of the file
684
+ //send the content to chatgpt, with the template property of the prompt object
685
+ //get the response
686
+ try {
687
+ var payload = {
688
+ params: req.params,
689
+ data: data
690
+ };
691
+ } catch (e) {
692
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
693
+ }
694
+ var businessOBJ = JOE.Data.business.find(b=>b._id == data.business);
695
+ var promptOBJ = JOE.Data.ai_prompt.find(p=>p._id == data.ai_prompt);
696
+
697
+
698
+ // See if there is an initial_transcript_url property on that object
699
+ const transcriptUrl = businessOBJ.initial_transcript_url;
700
+ if (!transcriptUrl) {
701
+ return res.jsonp({ error: 'No initial transcript URL found' });
702
+ }
703
+
704
+ //Get the content of the file from Google Docs
705
+ const transcriptContent = await getGoogleDocContent(transcriptUrl);
706
+ if (!transcriptContent || transcriptContent.error) {
707
+ return res.jsonp({ error: (transcriptContent.error && transcriptContent.error.message)||'Failed to fetch transcript content' });
708
+ }
709
+ const tokenCount = countTokens(`${promptOBJ.template}\n\n${transcriptContent}`);
710
+ payload.tokenCount = tokenCount;
711
+ coloredLog("token count: "+tokenCount);
712
+ //return res.jsonp({tokens:tokenCount,content:transcriptContent});
713
+ // Send the content to ChatGPT, with the template property of the prompt object
714
+ const client = new OpenAI({
715
+ apiKey: getAPIKey(), // This is the default and can be omitted
716
+ });
717
+
718
+ const chatResponse = await client.chat.completions.create({
719
+ messages: [{ role: 'user', content: `${promptOBJ.template}\n\n${transcriptContent}` }],
720
+ model: 'gpt-4o',
721
+ });
722
+
723
+ // Get the response
724
+ const chatContent = chatResponse.choices[0].message.content;
725
+ const responseName = `${businessOBJ.name} - ${promptOBJ.name}`;
726
+ // Save the response
727
+ await saveAIResponse({
728
+ name:responseName,
729
+ business: data.business,
730
+ ai_prompt: data.ai_prompt,
731
+ response: chatContent,
732
+ payload,
733
+ prompt_method:req.params.method
734
+ }, req && req.User);
735
+ coloredLog("response saved -"+responseName);
736
+ return {payload,
737
+ businessOBJ,
738
+ promptOBJ,
739
+ chatContent,
740
+ responseName
741
+ };
742
+
743
+ }
744
+
745
+ async function getGoogleDocContent(docUrl) {
746
+ try {
747
+ const auth = new google.auth.GoogleAuth({
748
+ scopes: ['https://www.googleapis.com/auth/documents.readonly']
749
+ });
750
+ //get google docs apikey from settings
751
+ const GOOGLE_API_KEY = JOE.Utils.Settings('GOOGLE_DOCS_API_KEY');
752
+ const docs = google.docs({ version: 'v1', auth:google_auth });
753
+ const docId = extractDocIdFromUrl(docUrl);
754
+ const doc = await docs.documents.get({ documentId: docId });
755
+
756
+ let content = doc.data.body.content.map(element => {
757
+ if (element.paragraph && element.paragraph.elements) {
758
+ return element.paragraph.elements.map(
759
+ e => e.textRun ? e.textRun.content.replace(/Euron Nicholson/g, '[EN]').replace(/\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}/g, '-ts-')
760
+ : ''
761
+ ).join('');
762
+ }
763
+ return '';
764
+ }).join('\n');
765
+
766
+ // Remove timestamps and line numbers
767
+ //content = content.replace(/^\d+\n\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}\n/gm, '');
768
+
769
+ return content;
770
+ } catch (error) {
771
+ console.error('Error fetching Google Doc content:', error);
772
+ return {error};
773
+ }
774
+ }
775
+ function countTokens(text, model = 'gpt-4o') {
776
+ const enc = encoding_for_model(model);
777
+ const tokens = enc.encode(text);
778
+ return tokens.length;
779
+ }
780
+ function extractDocIdFromUrl(url) {
781
+ const match = url.match(/\/d\/([a-zA-Z0-9-_]+)/);
782
+ return match ? match[1] : null;
783
+ }
784
+
785
+ async function saveAIResponse(data, user) {
786
+ try {
787
+ var creator_type = null;
788
+ var creator_id = null;
789
+ try{
790
+ if (user && user._id){
791
+ creator_type = 'user';
792
+ creator_id = user._id;
793
+ }
794
+ }catch(_e){}
795
+ const aiResponse = {
796
+ name: data.name,
797
+ itemtype: 'ai_response',
798
+ business: data.business,
799
+ ai_prompt: data.ai_prompt,
800
+ response: data.response,
801
+ payload: data.payload,
802
+ prompt_method:data.prompt_method,
803
+ created: (new Date).toISOString(),
804
+ _id:cuid(),
805
+ creator_type: creator_type,
806
+ creator_id: creator_id
807
+ // Add any other fields you want to save
808
+ };
809
+ await new Promise((resolve, reject) => {
810
+ JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
811
+ if (err) {
812
+ coloredLog('Error saving AI response: ' + err);
813
+ reject(err);
814
+ } else {
815
+ coloredLog('AI response saved successfully');
816
+ resolve(result);
817
+ }
818
+ });
819
+ });
820
+ } catch (error) {
821
+ coloredLog('Error in saveAIResponse: ' + error);
822
+ }
823
+ }
824
+
825
+ // Normalize model output that should contain JSON. Models often wrap JSON
826
+ // in markdown fences (```json ... ```), and may prepend/append prose. This
827
+ // helper strips fences and tries to isolate the first well-formed JSON
828
+ // object/array substring so JSON.parse has the best chance of succeeding.
829
+ function extractJsonText(raw) {
830
+ if (!raw) { return ''; }
831
+ let t = String(raw).trim();
832
+ // If there is any ```...``` fenced block, prefer its contents.
833
+ const fenceIdx = t.indexOf('```json') !== -1 ? t.indexOf('```json') : t.indexOf('```');
834
+ if (fenceIdx !== -1) {
835
+ let start = fenceIdx;
836
+ const firstNewline = t.indexOf('\n', start);
837
+ if (firstNewline !== -1) {
838
+ t = t.substring(firstNewline + 1);
839
+ } else {
840
+ t = t.substring(start + 3);
841
+ }
842
+ const lastFence = t.lastIndexOf('```');
843
+ if (lastFence !== -1) {
844
+ t = t.substring(0, lastFence);
845
+ }
846
+ t = t.trim();
847
+ }
848
+ // If there's extra prose around the JSON, slice from first {/[ to last }/]
849
+ if (t[0] !== '{' && t[0] !== '[') {
850
+ const firstBrace = t.indexOf('{');
851
+ const firstBracket = t.indexOf('[');
852
+ let first = -1;
853
+ if (firstBrace === -1) { first = firstBracket; }
854
+ else if (firstBracket === -1) { first = firstBrace; }
855
+ else { first = Math.min(firstBrace, firstBracket); }
856
+ const lastBrace = Math.max(t.lastIndexOf('}'), t.lastIndexOf(']'));
857
+ if (first !== -1 && lastBrace !== -1 && lastBrace > first) {
858
+ t = t.slice(first, lastBrace + 1);
859
+ }
860
+ }
861
+ return t.trim();
862
+ }
863
+
864
+ // Autofill feature (Responses API; supports assistant_id or model)
865
+ this.autofill = async function (data, req, res) {
866
+ const startedAt = Date.now();
867
+ try {
868
+ const body = data || {};
869
+ const objectId = body.object_id || body._id;
870
+ const object = body.object || $J.get(objectId);
871
+ const schemaName = body.schema || (object && object.itemtype) || body.itemtype;
872
+ const { full: schemaFull, summary: schemaSummary } = getSchemaDef(schemaName);
873
+ const rawFields = body.fields || body.field;
874
+ const fields = Array.isArray(rawFields) ? rawFields : (rawFields ? [rawFields] : []);
875
+ const userPrompt = body.prompt || '';
876
+ const assistantId = body.assistant_id || null;
877
+
878
+ if (!object) {
879
+ return { success: false, error: 'Object not found', code: 'OBJECT_NOT_FOUND' };
880
+ }
881
+ if (!schemaName) {
882
+ return { success: false, error: 'Schema name not determined', code: 'SCHEMA_REQUIRED' };
883
+ }
884
+ if (!fields.length) {
885
+ return { success: false, error: 'No fields specified', code: 'FIELDS_REQUIRED' };
886
+ }
887
+
888
+ const flattened = JOE.Utils.flattenObject(object._id);
889
+ const systemText = [
890
+ 'You are JOE (Json Object Editor) assistant.',
891
+ 'Task: Populate only the requested fields according to the provided schema context and JOE conventions.',
892
+ '- Respect field types (text, number, arrays, enums, references).',
893
+ '- Do NOT invent IDs for reference fields; only return human text for text-like fields.',
894
+ '- If a field is an enum, choose the closest valid enum. If unsure, omit it from patch.',
895
+ '- If a field is an array, return an array of values.',
896
+ '- Never modify unrelated fields.',
897
+ '- Output MUST be strict JSON with a top-level key "patch" containing only populated fields.',
898
+ '- If you lack sufficient information, return an empty patch.'
899
+ ].join('\\n');
900
+
901
+ const schemaForContext = schemaSummary || schemaFull || {};
902
+ const userInput = JSON.stringify({
903
+ action: 'autofill_fields',
904
+ target_schema: schemaName,
905
+ requested_fields: fields,
906
+ user_prompt: userPrompt,
907
+ object_context: flattened,
908
+ schema_context: schemaForContext
909
+ }, null, ' ');
910
+
911
+ const openai = newClient();
912
+ const model = body.model || 'gpt-4o-mini';////'gpt-5-nano';
913
+
914
+ // Normalize MCP options for autofill. By default, when mcp_enabled is
915
+ // true we expose the read-only toolset, which is safe for field
916
+ // suggestions. Callers can override toolset / selected tools.
917
+ const mcpEnabled = !!body.mcp_enabled;
918
+ const mcpToolset = body.mcp_toolset || 'read-only';
919
+ const mcpSelected = Array.isArray(body.mcp_selected_tools) ? body.mcp_selected_tools : null;
920
+ const mcpInstructionsMode = body.mcp_instructions_mode || 'auto';
921
+
922
+ let response;
923
+ let mcpToolCalls = [];
924
+ if (mcpEnabled) {
925
+ const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
926
+ const toolsForModel = MCP.getToolDefinitions(toolNames);
927
+ const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
928
+ const systemTextWithMcp = [systemText, mcpText || ''].join('\n').trim();
929
+
930
+ const messages = [{ role:'user', content:userInput }];
931
+
932
+ const runResult = await runWithTools({
933
+ openai: openai,
934
+ model: model,
935
+ systemText: systemTextWithMcp,
936
+ messages: messages,
937
+ assistant: { tools: toolsForModel },
938
+ req: req
939
+ });
940
+ response = runResult.response;
941
+ if (runResult && Array.isArray(runResult.toolCalls)) {
942
+ mcpToolCalls = runResult.toolCalls.map(function(tc){
943
+ return {
944
+ name: tc && (tc.name || tc.function_name || tc.tool_name),
945
+ arguments: tc && tc.arguments
946
+ };
947
+ }).filter(function(x){ return x && x.name; });
948
+ }
949
+ } else {
950
+ // For simplicity and robustness, use plain text output and instruct the
951
+ // model to return a strict JSON object. We previously attempted the
952
+ // Responses `json_schema` response_format, but the SDK shape can change
953
+ // and is harder to parse reliably; text + JSON.parse is sufficient here.
954
+ const requestBase = {
955
+ temperature: 0.2,
956
+ instructions: systemText,
957
+ input: userInput
958
+ };
959
+ // Optional web_search tool: if the caller sets allow_web truthy, expose
960
+ // the built-in web_search capability and let the model decide when to
961
+ // call it.
962
+ if (body.allow_web) {
963
+ coloredLog("allowing web search");
964
+ requestBase.tools = [{ type: 'web_search' }];
965
+ requestBase.tool_choice = 'auto';
966
+ }
967
+
968
+ if (assistantId) {
969
+ response = await openai.responses.create({ assistant_id: assistantId, ...requestBase });
970
+ } else {
971
+ response = await openai.responses.create({ model, ...requestBase });
972
+ }
973
+ }
974
+
975
+ let textOut = '';
976
+ try { textOut = response.output_text || ''; } catch (_e) {}
977
+ coloredLog("textOut: "+textOut);
978
+ if (!textOut && response && Array.isArray(response.output)) {
979
+ for (let i = 0; i < response.output.length; i++) {
980
+ const item = response.output[i];
981
+ if (item && item.type === 'message' && item.content && Array.isArray(item.content)) {
982
+ const textPart = item.content.find(function (c) { return c.type === 'output_text' || c.type === 'text'; });
983
+ if (textPart && (textPart.text || textPart.output_text)) {
984
+ textOut = textPart.text || textPart.output_text;
985
+ break;
986
+ }
987
+ }
988
+ }
989
+ }
990
+
991
+ let patch = {};
992
+ try {
993
+ const jsonText = extractJsonText(textOut);
994
+ const parsed = JSON.parse(jsonText || '{}');
995
+ patch = parsed.patch || {};
996
+ } catch (_e) {
997
+ console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
998
+ }
999
+ coloredLog("patch: "+JSON.stringify(patch));
1000
+ const filteredPatch = {};
1001
+ fields.forEach(function (f) {
1002
+ if (Object.prototype.hasOwnProperty.call(patch, f)) {
1003
+ filteredPatch[f] = patch[f];
1004
+ }
1005
+ });
1006
+ // If we got no fields back on the first attempt, retry once before
1007
+ // giving up. Avoid infinite loops by marking a retry flag.
1008
+ if (!Object.keys(filteredPatch).length && !body._retry) {
1009
+ coloredLog('[autofill] empty patch, retrying once');
1010
+ const retryBody = Object.assign({}, body, { _retry: true });
1011
+ return await self.autofill(retryBody, req, res);
1012
+ }
1013
+
1014
+ // Optional save
1015
+ let savedItem = null;
1016
+ if (body.save_history || body.save_itemtype) {
1017
+ const targetItemtype = body.save_itemtype || 'ai_response';
1018
+ if (JOE.Schemas && JOE.Schemas.schema && JOE.Schemas.schema[targetItemtype]) {
1019
+ const isAiResponse = (targetItemtype === 'ai_response');
1020
+ const toolNamesForSave = mcpEnabled ? MCP.getToolNamesForToolset(mcpToolset, mcpSelected) : [];
1021
+ const baseSave = {
1022
+ itemtype: targetItemtype,
1023
+ name: `[${schemaName}] autofill ${fields.join(', ')}`,
1024
+ object_id: object._id,
1025
+ target_schema: schemaName,
1026
+ fields,
1027
+ prompt: userPrompt,
1028
+ patch: filteredPatch,
1029
+ model,
1030
+ raw: { response, mcp_tools_used: mcpToolCalls }
1031
+ };
1032
+ if (isAiResponse) {
1033
+ baseSave.mcp_enabled = mcpEnabled;
1034
+ baseSave.mcp_toolset = mcpToolset;
1035
+ baseSave.mcp_selected_tools = toolNamesForSave;
1036
+ baseSave.mcp_instructions_mode = mcpInstructionsMode;
1037
+ baseSave.mcp_tools_used = mcpToolCalls;
1038
+ }
1039
+ await new Promise(function (resolve) {
1040
+ JOE.Storage.save(baseSave, targetItemtype, function (_err, saved) {
1041
+ savedItem = saved || null;
1042
+ resolve();
1043
+ });
1044
+ });
1045
+ }
1046
+ }
1047
+
1048
+ return {
1049
+ success: true,
1050
+ patch: filteredPatch,
1051
+ model,
1052
+ usage: response && response.usage,
1053
+ saved: !!savedItem,
1054
+ saved_item: savedItem,
1055
+ elapsed_ms: Date.now() - startedAt
1056
+ };
1057
+ } catch (e) {
1058
+ return { success: false, error: e && e.message || 'Unknown error' };
1059
+ }
1060
+ };
1061
+
1062
+ this.getResponse = function(data, req, res) {
1063
+ try {
1064
+ var prompt = data.prompt;
1065
+ if (!prompt) {
1066
+ return { error: 'No prompt provided' };
1067
+ }
1068
+
1069
+ // Simulate a response from ChatGPT
1070
+ var response = `ChatGPT response to: ${prompt}`;
1071
+ res.jsonp({ response: response });
1072
+ return { use_callback: true };
1073
+ } catch (e) {
1074
+ return { errors: 'plugin error: ' + e, failedat: 'plugin' };
1075
+ }
1076
+ };
1077
+
1078
+ this.html = function(data, req, res) {
1079
+ return JSON.stringify(self.default(data, req), '', '\t\r\n <br/>');
1080
+ };
1081
+ /* NEW AI RESPONSE API*/
1082
+
1083
+ this.executeJOEAiPrompt = async function(data, req, res) {
1084
+ const referencedObjectIds = []; // Track all objects touched during helper function
1085
+ try {
1086
+ const promptId = data.ai_prompt;
1087
+ // Support both payload shapes: { ai_prompt, params:{...}, ... } and flat
1088
+ const params = (data && (data.params || data)) || {};
1089
+
1090
+ if (!promptId) {
1091
+ return { error: "Missing prompt_id." };
1092
+ }
1093
+
1094
+ const prompt = await $J.get(promptId); // Use $J.get for consistency
1095
+ if (!prompt) {
1096
+ return { error: "Prompt not found." };
1097
+ }
1098
+
1099
+ let instructions = prompt.instructions || "";
1100
+ let finalInstructions=instructions;
1101
+ let finalInput='';
1102
+ // Pre-load all content_objects if content_items exist
1103
+ const contentObjects = {};
1104
+
1105
+ if (prompt.content_items && Array.isArray(prompt.content_items)) {
1106
+ for (const content of prompt.content_items) {
1107
+ if (params[content.reference]) {
1108
+ const obj = $J.get(params[content.reference]);
1109
+ if (obj) {
1110
+ contentObjects[content.itemtype] = obj;
1111
+
1112
+ // Pre-track referenced object
1113
+ if (obj._id && !referencedObjectIds.includes(obj._id)) {
1114
+ referencedObjectIds.push(obj._id);
1115
+ }
1116
+ }
1117
+ }
1118
+ }
1119
+ }
1120
+
1121
+ // Execute any helper functions if present
1122
+ if (prompt.functions) {
1123
+ const modFunc = JOE.Utils.requireFromString(prompt.functions, prompt._id);
1124
+ const helperResult = await modFunc({
1125
+ instructions,
1126
+ params,
1127
+ ai_prompt: prompt,
1128
+ content_objects: contentObjects,
1129
+ trackObject: (obj) => {
1130
+ if (obj?._id && !referencedObjectIds.includes(obj._id)) {
1131
+ referencedObjectIds.push(obj._id);
1132
+ }
1133
+ }
1134
+ });
1135
+
1136
+ if (typeof helperResult === 'object' && helperResult.error) {
1137
+ return { error: helperResult.error };
1138
+ }
1139
+
1140
+ // Assume the result is { instructions, input }
1141
+ finalInstructions = helperResult.instructions || instructions;
1142
+ finalInput = helperResult.input;
1143
+ }
1144
+
1145
+ // Build a compact uploaded_files header from any referenced objects that
1146
+ // have uploader-style files with OpenAI ids. This gives the model
1147
+ // explicit metadata about which files were attached and their roles so
1148
+ // prompts (like MCP Tokenize Client) can reason about "transcript"
1149
+ // vs "summary" sources instead of guessing from content alone.
1150
+ let uploadedFilesMeta = [];
1151
+ try{
1152
+ Object.keys(contentObjects || {}).forEach(function(itemtype){
1153
+ const obj = contentObjects[itemtype];
1154
+ if (!obj || typeof obj !== 'object') { return; }
1155
+ Object.keys(obj).forEach(function(field){
1156
+ const val = obj[field];
1157
+ if (!Array.isArray(val)) { return; }
1158
+ val.forEach(function(f){
1159
+ if (f && f.openai_file_id) {
1160
+ uploadedFilesMeta.push({
1161
+ itemtype: itemtype,
1162
+ field: field,
1163
+ name: f.filename || '',
1164
+ role: f.file_role || null,
1165
+ openai_file_id: f.openai_file_id
1166
+ });
1167
+ }
1168
+ });
1169
+ });
1170
+ });
1171
+ }catch(_e){ /* best-effort only */ }
1172
+ if (uploadedFilesMeta.length) {
1173
+ try{
1174
+ const header = { uploaded_files: uploadedFilesMeta };
1175
+ if (finalInput && String(finalInput).trim().length) {
1176
+ finalInput = JSON.stringify({
1177
+ uploaded_files: uploadedFilesMeta,
1178
+ input: finalInput
1179
+ }, null, 2);
1180
+ } else {
1181
+ finalInput = JSON.stringify(header, null, 2);
1182
+ }
1183
+ }catch(_e){ /* if JSON.stringify fails, leave finalInput as-is */ }
1184
+ }
1185
+
1186
+ const openai = newClient(); // however your OpenAI client is created
1187
+
1188
+ // Normalize MCP options from the ai_prompt record.
1189
+ const mcpEnabled = !!prompt.mcp_enabled;
1190
+ const mcpToolset = prompt.mcp_toolset || 'read-only';
1191
+ const mcpSelected = Array.isArray(prompt.mcp_selected_tools) ? prompt.mcp_selected_tools : null;
1192
+ const mcpInstructionsMode = prompt.mcp_instructions_mode || 'auto';
1193
+
1194
+ // If MCP is enabled, prefer Responses+tools via runWithTools. Otherwise,
1195
+ // keep the existing single-call Responses behavior using prompt.tools.
1196
+ let response;
1197
+ let resolvedToolNames = null;
1198
+ let mcpToolCalls = [];
1199
+ if (mcpEnabled) {
1200
+ // Determine tool names from the configured toolset + overrides.
1201
+ const toolNames = MCP.getToolNamesForToolset(mcpToolset, mcpSelected);
1202
+ resolvedToolNames = toolNames;
1203
+ const toolsForModel = MCP.getToolDefinitions(toolNames);
1204
+
1205
+ // Build per-tool MCP instructions (short) and append to the existing instructions.
1206
+ const mcpText = MCP.buildToolInstructions(toolNames, mcpInstructionsMode);
1207
+ const systemText = [finalInstructions || instructions || '']
1208
+ .concat(mcpText ? ['\n', mcpText] : [])
1209
+ .join('\n')
1210
+ .trim();
1211
+
1212
+ const messages = [];
1213
+ if (finalInput && String(finalInput).trim().length) {
1214
+ messages.push({ role:'user', content:String(finalInput) });
1215
+ }
1216
+ // Ensure the Responses API always has some input when MCP is enabled.
1217
+ // For prompts that rely purely on system instructions, synthesize a
1218
+ // minimal user turn so the call remains valid.
1219
+ if (!messages.length) {
1220
+ messages.push({
1221
+ role: 'user',
1222
+ content: 'Follow the system instructions above and produce the requested output.'
1223
+ });
1224
+ }
1225
+
1226
+ const runResult = await runWithTools({
1227
+ openai: openai,
1228
+ model: prompt.ai_model || "gpt-4o",
1229
+ systemText: systemText,
1230
+ messages: messages,
1231
+ // Provide a synthetic assistant-style object so runWithTools can
1232
+ // normalize tools into Responses format.
1233
+ assistant: { tools: toolsForModel },
1234
+ // Pass through attachments so MCP runs see the same files as
1235
+ // non‑MCP prompts (direct or file_search modes).
1236
+ attachments_mode: prompt.attachments_mode || 'direct',
1237
+ openai_file_ids: Array.isArray(data.openai_file_ids) ? data.openai_file_ids : null,
1238
+ req: req
1239
+ });
1240
+ response = runResult.response;
1241
+ if (runResult && Array.isArray(runResult.toolCalls)) {
1242
+ mcpToolCalls = runResult.toolCalls.map(function(tc){
1243
+ return {
1244
+ name: tc && (tc.name || tc.function_name || tc.tool_name),
1245
+ arguments: tc && tc.arguments
1246
+ };
1247
+ }).filter(function(x){ return x && x.name; });
1248
+ }
1249
+ } else {
1250
+ const payloadBase = {
1251
+ model: prompt.ai_model || "gpt-4o",
1252
+ instructions: finalInstructions||instructions, // string only
1253
+ input:finalInput||'',
1254
+ tools: prompt.tools || [{ "type": "web_search" }],
1255
+ tool_choice: prompt.tool_choice || "auto",
1256
+ temperature: prompt.temperature ? parseFloat(prompt.temperature) : 0.7,
1257
+ //return_token_usage: true
1258
+ //max_tokens: prompt.max_tokens ?? 1200
1259
+ };
1260
+ coloredLog(`${payloadBase.model} and ${payloadBase.temperature}`);
1261
+ const mode = (prompt.attachments_mode || 'direct');
1262
+ let payload = payloadBase;
1263
+ if (Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1264
+ try{
1265
+ payload = await attachFilesToResponsesPayload(openai, payloadBase, {
1266
+ attachments_mode: mode,
1267
+ openai_file_ids: data.openai_file_ids
1268
+ });
1269
+ }catch(e){
1270
+ console.warn('[chatgpt] attachFilesToResponsesPayload failed; continuing without attachments', e && e.message || e);
1271
+ }
1272
+ }
1273
+ response = await safeResponsesCreate(openai, payload);
1274
+ }
1275
+
1276
+
1277
+ // const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
1278
+
1279
+ // const response = await openai.chat.completions.create(payload);
1280
+
1281
+ const saved = await saveAiResponseRefactor({
1282
+ prompt,
1283
+ ai_response_content: response.output_text || "",
1284
+ user_prompt: finalInput || '',
1285
+ params,
1286
+ referenced_object_ids: referencedObjectIds,
1287
+ response_id:response.id,
1288
+ usage: response.usage || {},
1289
+ user: req && req.User,
1290
+ ai_assistant_id: data.ai_assistant_id,
1291
+ mcp_enabled: mcpEnabled,
1292
+ mcp_toolset: mcpToolset,
1293
+ mcp_selected_tools: resolvedToolNames || (Array.isArray(mcpSelected) ? mcpSelected : []),
1294
+ mcp_instructions_mode: mcpInstructionsMode,
1295
+ mcp_tools_used: mcpToolCalls
1296
+ });
1297
+ try{
1298
+ if (saved && Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1299
+ saved.used_openai_file_ids = data.openai_file_ids.slice(0,10);
1300
+ await new Promise(function(resolve){
1301
+ JOE.Storage.save(saved,'ai_response',function(){ resolve(); },{ user: req && req.User, history:false });
1302
+ });
1303
+ }
1304
+ }catch(_e){}
1305
+
1306
+ return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
1307
+ } catch (e) {
1308
+ console.error('❌ executeJOEAiPrompt error:', e);
1309
+ return { error: "Failed to execute AI prompt.",message: e.message };
1310
+ }
1311
+ };
1312
+
1313
+ function createResponsePayload(prompt, params, instructions, user_prompt) {
1314
+ return {
1315
+ model: prompt.model || "gpt-4o",
1316
+ messages: [
1317
+ { role: "system", content: instructions },
1318
+ { role: "user", content: user_prompt || "" }
1319
+ ],
1320
+ tools: prompt.tools || undefined,
1321
+ tool_choice: prompt.tool_choice || "auto",
1322
+ temperature: prompt.temperature ?? 0.7,
1323
+ max_tokens: prompt.max_tokens ?? 1200
1324
+ };
1325
+ }
1326
+ async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id, mcp_enabled, mcp_toolset, mcp_selected_tools, mcp_instructions_mode, mcp_tools_used }) {
1327
+ var response_keys = [];
1328
+ try {
1329
+ response_keys = Object.keys(JSON.parse(ai_response_content));
1330
+ }catch (e) {
1331
+ console.error('❌ Error parsing AI response content for keys:', e);
1332
+ }
1333
+ // Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
1334
+ let parsedResponse = null;
1335
+ try {
1336
+ const jt = extractJsonText(ai_response_content);
1337
+ if (jt) {
1338
+ parsedResponse = JSON.parse(jt);
1339
+ }
1340
+ } catch(_e) {
1341
+ parsedResponse = null;
1342
+ }
1343
+ var creator_type = null;
1344
+ var creator_id = null;
1345
+ try{
1346
+ if (ai_assistant_id){
1347
+ creator_type = 'ai_assistant';
1348
+ creator_id = ai_assistant_id;
1349
+ } else if (user && user._id){
1350
+ creator_type = 'user';
1351
+ creator_id = user._id;
1352
+ }
1353
+ }catch(_e){}
1354
+ const aiResponse = {
1355
+ name: `${prompt.name}`,
1356
+ itemtype: 'ai_response',
1357
+ ai_prompt: prompt._id,
1358
+ prompt_name: prompt.name,
1359
+ prompt_method:prompt.prompt_method,
1360
+ response: ai_response_content,
1361
+ response_json: parsedResponse,
1362
+ response_keys: response_keys,
1363
+ response_id:response_id||'',
1364
+ user_prompt: user_prompt,
1365
+ params_used: params,
1366
+ usage: usage || {},
1367
+ tags: prompt.tags || [],
1368
+ model_used: prompt.ai_model || "gpt-4o",
1369
+ referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
1370
+ created: (new Date).toISOString(),
1371
+ _id: cuid(),
1372
+ creator_type: creator_type,
1373
+ creator_id: creator_id
1374
+ };
1375
+ // Only attach MCP metadata when MCP was actually enabled for this run, to
1376
+ // avoid introducing nulls into history diffs.
1377
+ try{
1378
+ if (mcp_enabled) {
1379
+ aiResponse.mcp_enabled = true;
1380
+ if (mcp_toolset) { aiResponse.mcp_toolset = mcp_toolset; }
1381
+ if (Array.isArray(mcp_selected_tools) && mcp_selected_tools.length) {
1382
+ aiResponse.mcp_selected_tools = mcp_selected_tools;
1383
+ }
1384
+ if (mcp_instructions_mode) {
1385
+ aiResponse.mcp_instructions_mode = mcp_instructions_mode;
1386
+ }
1387
+ if (Array.isArray(mcp_tools_used) && mcp_tools_used.length) {
1388
+ aiResponse.mcp_tools_used = mcp_tools_used;
1389
+ }
1390
+ }
1391
+ }catch(_e){}
1392
+
1393
+ await new Promise((resolve, reject) => {
1394
+ JOE.Storage.save(aiResponse, 'ai_response', function(err, result) {
1395
+ if (err) {
1396
+ console.error('❌ Error saving AI response:', err);
1397
+ reject(err);
1398
+ } else {
1399
+ console.log('✅ AI response saved successfully');
1400
+ resolve(result);
1401
+ }
1402
+ });
1403
+ });
1404
+
1405
+ return aiResponse;
1406
+ }
1407
+
1408
+ // ---------- Widget chat endpoints (Responses API + optional assistants) ----------
1409
+ function normalizeMessages(messages) {
1410
+ if (!Array.isArray(messages)) { return []; }
1411
+ return messages.map(function (m) {
1412
+ return {
1413
+ role: m.role || 'assistant',
1414
+ content: m.content || '',
1415
+ created_at: m.created_at || m.created || new Date().toISOString()
1416
+ };
1417
+ });
1418
+ }
1419
+
1420
+ /**
1421
+ * widgetStart
1422
+ *
1423
+ * Purpose:
1424
+ * Create and persist a new `ai_widget_conversation` record for the
1425
+ * external `<joe-ai-widget>` chat component. This is a lightweight
1426
+ * conversation record that stores model, assistant, system text and
1427
+ * messages for the widget.
1428
+ *
1429
+ * Inputs (data):
1430
+ * - model (optional) override model for the widget
1431
+ * - ai_assistant_id (optional) JOE ai_assistant cuid
1432
+ * - system (optional) explicit system text
1433
+ * - source (optional) freeform source tag, defaults to "widget"
1434
+ *
1435
+ * OpenAI calls:
1436
+ * - None. This endpoint only touches storage.
1437
+ *
1438
+ * Output:
1439
+ * - { success, conversation_id, model, assistant_id }
1440
+ * where assistant_id is the OpenAI assistant_id (if present).
1441
+ */
1442
+ this.widgetStart = async function (data, req, res) {
1443
+ try {
1444
+ var body = data || {};
1445
+ // Default to a modern chat model when no assistant/model is provided.
1446
+ // If an assistant is supplied, its ai_model will override this.
1447
+ var model = body.model || "gpt-5.1";
1448
+ var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
1449
+ var system = body.system || (assistant && assistant.instructions) || "";
1450
+ // Prefer explicit user fields coming from the client (ai-widget-test page
1451
+ // passes _joe.User fields). Widget endpoints no longer infer from req.User
1452
+ // to keep a single, explicit source of truth.
1453
+ var user = null;
1454
+ if (body.user_id || body.user_name || body.user_color) {
1455
+ user = {
1456
+ _id: body.user_id,
1457
+ name: body.user_name,
1458
+ fullname: body.user_name,
1459
+ color: body.user_color
1460
+ };
1461
+ }
1462
+ var user_color = (body.user_color) || (user && user.color) || null;
1463
+
1464
+ var convo = {
1465
+ _id: (typeof cuid === 'function') ? cuid() : undefined,
1466
+ itemtype: "ai_widget_conversation",
1467
+ model: (assistant && assistant.ai_model) || model,
1468
+ assistant: assistant && assistant._id,
1469
+ assistant_id: assistant && assistant.assistant_id,
1470
+ assistant_color: assistant && assistant.assistant_color,
1471
+ user: user && user._id,
1472
+ user_name: user && (user.fullname || user.name),
1473
+ user_color: user_color,
1474
+ system: system,
1475
+ messages: [],
1476
+ source: body.source || "widget",
1477
+ // Optional scope for object-scoped widget chats
1478
+ scope_itemtype: body.scope_itemtype || null,
1479
+ scope_id: body.scope_id || null,
1480
+ created: new Date().toISOString(),
1481
+ joeUpdated: new Date().toISOString()
1482
+ };
1483
+ if (body.name && !convo.name) {
1484
+ convo.name = String(body.name);
1485
+ }
1486
+
1487
+ const saved = await new Promise(function (resolve, reject) {
1488
+ // Widget conversations are lightweight and do not need full history diffs.
1489
+ JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
1490
+ if (err) return reject(err);
1491
+ resolve(result);
1492
+ }, { history: false });
1493
+ });
1494
+
1495
+ return {
1496
+ success: true,
1497
+ conversation_id: saved._id,
1498
+ model: saved.model,
1499
+ assistant_id: saved.assistant_id || null,
1500
+ assistant_color: saved.assistant_color || null,
1501
+ user_color: saved.user_color || user_color || null
1502
+ };
1503
+ } catch (e) {
1504
+ console.error("[chatgpt] widgetStart error:", e);
1505
+ return { success: false, error: e && e.message || "Unknown error" };
1506
+ }
1507
+ };
1508
+
1509
+ /**
1510
+ * widgetHistory
1511
+ *
1512
+ * Purpose:
1513
+ * Load an existing `ai_widget_conversation` and normalize its
1514
+ * messages for use by `<joe-ai-widget>` on page load or refresh.
1515
+ *
1516
+ * Inputs (data):
1517
+ * - conversation_id or _id: the widget conversation cuid
1518
+ *
1519
+ * OpenAI calls:
1520
+ * - None. Purely storage + normalization.
1521
+ *
1522
+ * Output:
1523
+ * - { success, conversation_id, model, assistant_id, messages }
1524
+ */
1525
+ this.widgetHistory = async function (data, req, res) {
1526
+ try {
1527
+ var conversation_id = data.conversation_id || data._id;
1528
+ if (!conversation_id) {
1529
+ return { success: false, error: "Missing conversation_id" };
1530
+ }
1531
+ const convo = await new Promise(function (resolve, reject) {
1532
+ JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1533
+ if (err) return reject(err);
1534
+ resolve(results && results[0]);
1535
+ });
1536
+ });
1537
+ if (!convo) {
1538
+ return { success: false, error: "Conversation not found" };
1539
+ }
1540
+
1541
+ convo.messages = normalizeMessages(convo.messages);
1542
+ return {
1543
+ success: true,
1544
+ conversation_id: convo._id,
1545
+ model: convo.model,
1546
+ assistant_id: convo.assistant_id || null,
1547
+ assistant_color: convo.assistant_color || null,
1548
+ user_color: convo.user_color || null,
1549
+ messages: convo.messages
1550
+ };
1551
+ } catch (e) {
1552
+ console.error("[chatgpt] widgetHistory error:", e);
1553
+ return { success: false, error: e && e.message || "Unknown error" };
1554
+ }
1555
+ };
1556
+
1557
+ /**
1558
+ * widgetMessage
1559
+ *
1560
+ * Purpose:
1561
+ * Handle a single user turn for `<joe-ai-widget>`:
1562
+ * - Append the user message to the stored conversation.
1563
+ * - Call OpenAI Responses (optionally with tools from the selected
1564
+ * `ai_assistant`, via runWithTools + MCP).
1565
+ * - Append the assistant reply, persist the conversation, and return
1566
+ * the full message history plus the latest assistant message.
1567
+ *
1568
+ * Inputs (data):
1569
+ * - conversation_id or _id: cuid of the widget conversation
1570
+ * - content: user text
1571
+ * - role: user role, defaults to "user"
1572
+ * - assistant_id: optional OpenAI assistant_id (used only to
1573
+ * locate the JOE ai_assistant config)
1574
+ * - model: optional model override
1575
+ *
1576
+ * OpenAI calls:
1577
+ * - responses.create (once if no tools; twice when tools are present):
1578
+ * * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
1579
+ * * Any tool calls are executed via MCP and injected as `tool` messages.
1580
+ * * Second call is plain Responses with updated messages.
1581
+ *
1582
+ * Output:
1583
+ * - { success, conversation_id, model, assistant_id, messages,
1584
+ * last_message, usage }
1585
+ */
1586
+ this.widgetMessage = async function (data, req, res) {
1587
+ try {
1588
+ var body = data || {};
1589
+ var conversation_id = body.conversation_id || body._id;
1590
+ var content = body.content;
1591
+ var role = body.role || "user";
1592
+
1593
+ if (!conversation_id || !content) {
1594
+ return { success: false, error: "Missing conversation_id or content" };
1595
+ }
1596
+
1597
+ const convo = await new Promise(function (resolve, reject) {
1598
+ JOE.Storage.load("ai_widget_conversation", { _id: conversation_id }, function (err, results) {
1599
+ if (err) return reject(err);
1600
+ resolve(results && results[0]);
1601
+ });
1602
+ });
1603
+ if (!convo) {
1604
+ return { success: false, error: "Conversation not found" };
1605
+ }
1606
+
1607
+ // Best-effort: if this is an object-scoped conversation and we have
1608
+ // not yet attached any files, walk the scoped object for uploader
1609
+ // style files that have OpenAI ids and cache them on the convo.
1610
+ try{
1611
+ if ((!convo.attached_openai_file_ids || !convo.attached_openai_file_ids.length) &&
1612
+ convo.scope_itemtype && convo.scope_id) {
1613
+ var scopedObj = null;
1614
+ try{
1615
+ scopedObj = $J.get(convo.scope_id, convo.scope_itemtype) || $J.get(convo.scope_id);
1616
+ }catch(_e){}
1617
+ if (scopedObj && typeof scopedObj === 'object') {
1618
+ var ids = [];
1619
+ var meta = [];
1620
+ Object.keys(scopedObj).forEach(function(field){
1621
+ var val = scopedObj[field];
1622
+ if (!Array.isArray(val)) { return; }
1623
+ val.forEach(function(f){
1624
+ if (f && f.openai_file_id) {
1625
+ ids.push(f.openai_file_id);
1626
+ meta.push({
1627
+ itemtype: scopedObj.itemtype || convo.scope_itemtype,
1628
+ field: field,
1629
+ name: f.filename || '',
1630
+ role: f.file_role || null,
1631
+ openai_file_id: f.openai_file_id
1632
+ });
1633
+ }
1634
+ });
1635
+ });
1636
+ if (ids.length) {
1637
+ convo.attached_openai_file_ids = ids;
1638
+ convo.attached_files_meta = meta;
1639
+ }
1640
+ }
1641
+ }
1642
+ }catch(_e){ /* non-fatal */ }
1643
+
1644
+ convo.messages = normalizeMessages(convo.messages);
1645
+
1646
+ // On the very first turn of an object-scoped widget conversation,
1647
+ // pre-load a slimmed understandObject snapshot so the assistant
1648
+ // immediately knows which record "this client/task/..." refers to
1649
+ // without having to remember to call MCP. We keep this snapshot
1650
+ // concise via slimUnderstandObjectResult and only inject it once.
1651
+ try{
1652
+ var isObjectChat = (convo.source === 'object_chat') && convo.scope_id;
1653
+ var hasMessages = Array.isArray(convo.messages) && convo.messages.length > 0;
1654
+ if (isObjectChat && !hasMessages){
1655
+ const uo = await callMCPTool('understandObject', {
1656
+ _id: convo.scope_id,
1657
+ itemtype: convo.scope_itemtype || undefined,
1658
+ depth: 1,
1659
+ slim: true
1660
+ }, { req });
1661
+ const slimmed = slimUnderstandObjectResult(uo);
1662
+ if (slimmed) {
1663
+ convo.messages = convo.messages || [];
1664
+ convo.messages.push({
1665
+ role: 'system',
1666
+ content: JSON.stringify({
1667
+ tool: 'understandObject',
1668
+ scope_object: slimmed
1669
+ })
1670
+ });
1671
+ }
1672
+ }
1673
+ }catch(_e){
1674
+ console.warn('[chatgpt] widgetMessage understandObject preload failed', _e && _e.message || _e);
1675
+ }
1676
+
1677
+ const nowIso = new Date().toISOString();
1678
+
1679
+ // Append user message
1680
+ const userMsg = { role: role, content: content, created_at: nowIso };
1681
+ convo.messages.push(userMsg);
1682
+
1683
+ // Backfill user metadata (id/name/color) on older conversations that
1684
+ // were created before we started storing these fields. Prefer explicit
1685
+ // body fields only; we no longer infer from req.User so that widget
1686
+ // calls always have a single, explicit user source.
1687
+ var u = null;
1688
+ if (body.user_id || body.user_name || body.user_color) {
1689
+ u = {
1690
+ _id: body.user_id,
1691
+ name: body.user_name,
1692
+ fullname: body.user_name,
1693
+ color: body.user_color
1694
+ };
1695
+ }
1696
+ if (u) {
1697
+ if (!convo.user && u._id) {
1698
+ convo.user = u._id;
1699
+ }
1700
+ if (!convo.user_name && (u.fullname || u.name)) {
1701
+ convo.user_name = u.fullname || u.name;
1702
+ }
1703
+ if (!convo.user_color && u.color) {
1704
+ convo.user_color = u.color;
1705
+ }
1706
+ }
1707
+
1708
+ const assistantId = body.assistant_id || convo.assistant_id || null;
1709
+ // NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
1710
+ // We do NOT pass assistant_id to the Responses API (it is not supported in the
1711
+ // version we are using); instead we look up the JOE ai_assistant by assistant_id
1712
+ // and inject its configuration (model, instructions, tools) into the request.
1713
+ var assistantObj = null;
1714
+ if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
1715
+ assistantObj = JOE.Data.ai_assistant.find(function (a) {
1716
+ return a && a.assistant_id === assistantId;
1717
+ }) || null;
1718
+ }
1719
+ const openai = newClient();
1720
+ const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
1721
+
1722
+ // Prefer explicit system text on the conversation, then assistant instructions.
1723
+ const baseSystemText = (convo.system && String(convo.system)) ||
1724
+ (assistantObj && assistantObj.instructions) ||
1725
+ "";
1726
+
1727
+ // When this conversation was launched from an object ("Start Chat"
1728
+ // on a record), include a small scope hint so the assistant knows
1729
+ // which object id/itemtype to use with MCP tools like
1730
+ // understandObject/search. We keep this concise to avoid
1731
+ // unnecessary tokens but still make the scope unambiguous.
1732
+ let systemText = baseSystemText;
1733
+ try{
1734
+ if (convo.source === 'object_chat' && convo.scope_id) {
1735
+ const scopeLine = '\n\n---\nJOE scope_object:\n'
1736
+ + '- itemtype: ' + String(convo.scope_itemtype || 'unknown') + '\n'
1737
+ + '- _id: ' + String(convo.scope_id) + '\n'
1738
+ + 'When you need this object\'s details, call the MCP tool "understandObject" '
1739
+ + 'with these identifiers, or search for related records using the MCP search tools.\n';
1740
+ systemText = (baseSystemText || '') + scopeLine;
1741
+ }
1742
+ }catch(_e){ /* non-fatal */ }
1743
+
1744
+ // Build the messages array for the model. We deliberately separate
1745
+ // the stored `convo.messages` from the model-facing payload so we
1746
+ // can annotate the latest user turn with uploaded_files metadata
1747
+ // without altering the persisted history.
1748
+ const messagesForModel = convo.messages.map(function (m) {
1749
+ return { role: m.role, content: m.content };
1750
+ });
1751
+ // If we have attached file metadata, wrap the latest user turn in a
1752
+ // small JSON envelope so the model can see which files exist and how
1753
+ // they are labeled (role, name, origin field) while still receiving
1754
+ // the raw user input as `input`.
1755
+ try{
1756
+ if (convo.attached_files_meta && convo.attached_files_meta.length && messagesForModel.length) {
1757
+ var lastMsg = messagesForModel[messagesForModel.length - 1];
1758
+ if (lastMsg && lastMsg.role === role && typeof lastMsg.content === 'string') {
1759
+ lastMsg.content = JSON.stringify({
1760
+ uploaded_files: convo.attached_files_meta,
1761
+ input: lastMsg.content
1762
+ }, null, 2);
1763
+ }
1764
+ }
1765
+ }catch(_e){ /* non-fatal */ }
1766
+
1767
+ // Collect OpenAI file ids from scoped object attachments and any
1768
+ // assistant-level files so they are available to the model via the
1769
+ // shared attachFilesToResponsesPayload helper inside runWithTools.
1770
+ var openaiFileIds = [];
1771
+ if (Array.isArray(convo.attached_openai_file_ids) && convo.attached_openai_file_ids.length){
1772
+ openaiFileIds = openaiFileIds.concat(convo.attached_openai_file_ids);
1773
+ }
1774
+ try{
1775
+ if (assistantObj && Array.isArray(assistantObj.assistant_files)) {
1776
+ assistantObj.assistant_files.forEach(function(f){
1777
+ if (f && f.openai_file_id) {
1778
+ openaiFileIds.push(f.openai_file_id);
1779
+ }
1780
+ });
1781
+ }
1782
+ }catch(_e){}
1783
+
1784
+ // Use runWithTools so that, when an assistant has tools configured,
1785
+ // we let the model call those tools via MCP / function tools before
1786
+ // generating a final response. Attach any discovered OpenAI files
1787
+ // so the model can read from them as needed.
1788
+ const runResult = await runWithTools({
1789
+ openai: openai,
1790
+ model: model,
1791
+ systemText: systemText,
1792
+ messages: messagesForModel,
1793
+ assistant: assistantObj,
1794
+ attachments_mode: (body.attachments_mode || 'direct'),
1795
+ openai_file_ids: openaiFileIds.length ? openaiFileIds : null,
1796
+ req: req
1797
+ });
1798
+
1799
+ // If tools were called this turn, inject a small meta message so the
1800
+ // widget clearly shows which functions ran before the assistant reply.
1801
+ if (runResult.toolCalls && runResult.toolCalls.length) {
1802
+ const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
1803
+ .filter(Boolean)
1804
+ .join(', ');
1805
+ convo.messages.push({
1806
+ role: "assistant",
1807
+ meta: "tools_used",
1808
+ content: "[Tools used this turn: " + names + "]",
1809
+ created_at: nowIso
1810
+ });
1811
+ }
1812
+
1813
+ const assistantText = runResult.finalText || "";
1814
+ const assistantMsg = {
1815
+ role: "assistant",
1816
+ content: assistantText,
1817
+ created_at: new Date().toISOString()
1818
+ };
1819
+ convo.messages.push(assistantMsg);
1820
+ convo.last_message_at = assistantMsg.created_at;
1821
+ convo.joeUpdated = assistantMsg.created_at;
1822
+
1823
+ await new Promise(function (resolve, reject) {
1824
+ // Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
1825
+ JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
1826
+ if (err) return reject(err);
1827
+ resolve(saved);
1828
+ }, { history: false });
1829
+ });
1830
+
1831
+ return {
1832
+ success: true,
1833
+ conversation_id: convo._id,
1834
+ model: model,
1835
+ assistant_id: assistantId,
1836
+ assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
1837
+ user_color: convo.user_color || ((u && u.color) || null),
1838
+ messages: convo.messages,
1839
+ last_message: assistantMsg,
1840
+ // Usage comes from the underlying Responses call inside runWithTools.
1841
+ usage: (runResult.response && runResult.response.usage) || {}
1842
+ };
1843
+ } catch (e) {
1844
+ console.error("[chatgpt] widgetMessage error:", e);
1845
+ return { success: false, error: e && e.message || "Unknown error" };
1846
+ }
1847
+ };
1848
+
1849
+ // Mark async plugin methods so Server.pluginHandling will await them.
1850
+ this.async = {
1851
+ executeJOEAiPrompt: this.executeJOEAiPrompt,
1852
+ testPrompt: this.testPrompt,
1853
+ sendInitialConsultTranscript: this.sendInitialConsultTranscript,
1854
+ widgetStart: this.widgetStart,
1855
+ widgetHistory: this.widgetHistory,
1856
+ widgetMessage: this.widgetMessage,
1857
+ autofill: this.autofill,
1858
+ filesRetryFromUrl: this.filesRetryFromUrl
1859
+ };
1860
+ this.protected = [,'testPrompt'];
1861
+ return self;
1862
+ }
1863
+
1864
+ module.exports = new ChatGPT();