json-object-editor 0.10.625 → 0.10.632
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/_www/ai-widget-test.html +367 -0
- package/_www/mcp-test.html +10 -1
- package/css/joe-styles.css +11 -3
- package/css/joe.css +12 -4
- package/css/joe.min.css +1 -1
- package/docs/joe_agent_custom_gpt_instructions_v_3.md +9 -0
- package/dummy +10 -0
- package/img/svgs/ai_assistant.svg +1 -0
- package/img/svgs/ai_assistant_white.svg +1 -0
- package/js/JsonObjectEditor.jquery.craydent.js +34 -3
- package/js/joe-ai.js +784 -52
- package/js/joe.js +52 -21
- package/js/joe.min.js +1 -1
- package/package.json +1 -1
- package/readme.md +8 -1
- package/server/apps/aihub.js +97 -0
- package/server/fields/core.js +4 -1
- package/server/modules/MCP.js +233 -2
- package/server/modules/Server.js +1 -46
- package/server/plugins/auth.js +34 -30
- package/server/plugins/chatgpt-assistants.js +70 -35
- package/server/plugins/chatgpt.js +560 -44
- package/server/schemas/ai_assistant.js +149 -1
- package/server/schemas/ai_conversation.js +14 -1
- package/server/schemas/ai_widget_conversation.js +133 -14
- package/server/schemas/project.js +27 -3
- package/server/schemas/task.js +1 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
const OpenAI = require("openai");
|
|
2
2
|
const { google } = require('googleapis');
|
|
3
3
|
const path = require('path');
|
|
4
|
+
const MCP = require("../modules/MCP.js");
|
|
4
5
|
// const { name } = require("json-object-editor/server/webconfig");
|
|
5
6
|
|
|
6
7
|
function ChatGPT() {
|
|
@@ -55,6 +56,349 @@ function ChatGPT() {
|
|
|
55
56
|
const summary = JOE.Schemas && JOE.Schemas.summary && JOE.Schemas.summary[name];
|
|
56
57
|
return { full, summary };
|
|
57
58
|
}
|
|
59
|
+
/**
|
|
60
|
+
* callMCPTool
|
|
61
|
+
*
|
|
62
|
+
* Small, well‑scoped helper to invoke a JOE MCP tool directly in‑process,
|
|
63
|
+
* without going over HTTP or worrying about POST size limits.
|
|
64
|
+
*
|
|
65
|
+
* Usage:
|
|
66
|
+
* const result = await callMCPTool('listSchemas', {}, { req });
|
|
67
|
+
*
|
|
68
|
+
* Notes:
|
|
69
|
+
* - `toolName` must exist on MCP.tools.
|
|
70
|
+
* - `params` should be a plain JSON-serializable object.
|
|
71
|
+
* - `ctx` is optional and can pass `{ req }` or other context that MCP
|
|
72
|
+
* tools might want (for auth, user, etc.).
|
|
73
|
+
*/
|
|
74
|
+
async function callMCPTool(toolName, params = {}, ctx = {}) {
|
|
75
|
+
if (!MCP || !MCP.tools) {
|
|
76
|
+
throw new Error("MCP module not initialized; cannot call MCP tool");
|
|
77
|
+
}
|
|
78
|
+
if (!toolName || typeof toolName !== 'string') {
|
|
79
|
+
throw new Error("Missing or invalid MCP tool name");
|
|
80
|
+
}
|
|
81
|
+
const fn = MCP.tools[toolName];
|
|
82
|
+
if (typeof fn !== 'function') {
|
|
83
|
+
throw new Error(`MCP tool "${toolName}" not found`);
|
|
84
|
+
}
|
|
85
|
+
try {
|
|
86
|
+
// All MCP tools accept (params, ctx) and return a JSON-serializable result.
|
|
87
|
+
// The Responses / tools API often returns arguments as a JSON string, so
|
|
88
|
+
// normalize that here before invoking the tool.
|
|
89
|
+
let toolParams = params;
|
|
90
|
+
if (typeof toolParams === 'string') {
|
|
91
|
+
try {
|
|
92
|
+
toolParams = JSON.parse(toolParams);
|
|
93
|
+
} catch (parseErr) {
|
|
94
|
+
console.error(`[chatgpt] Failed to JSON-parse tool arguments for "${toolName}"`, parseErr, toolParams);
|
|
95
|
+
// Fall back to passing the raw string so tools that expect it still work.
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
const result = await fn(toolParams || {}, ctx || {});
|
|
99
|
+
return result;
|
|
100
|
+
} catch (e) {
|
|
101
|
+
// Surface a clean error upstream but keep details in logs.
|
|
102
|
+
console.error(`[chatgpt] MCP tool "${toolName}" error:`, e);
|
|
103
|
+
throw new Error(`MCP tool "${toolName}" failed: ${e && e.message || 'Unknown error'}`);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* extractToolCalls
|
|
109
|
+
*
|
|
110
|
+
* Best-effort parser for tool calls from a Responses API result.
|
|
111
|
+
* The Responses output shape may evolve; this function looks for
|
|
112
|
+
* any "tool_call" typed content in response.output[*].content[*]
|
|
113
|
+
* and normalizes it into `{ name, arguments }` objects.
|
|
114
|
+
*/
|
|
115
|
+
function extractToolCalls(response) {
|
|
116
|
+
var calls = [];
|
|
117
|
+
if (!response || !Array.isArray(response.output)) { return calls; }
|
|
118
|
+
|
|
119
|
+
response.output.forEach(function (item) {
|
|
120
|
+
if (!item) { return; }
|
|
121
|
+
// v1-style: item.type === 'tool_call'
|
|
122
|
+
if (item.type === 'function_call') {
|
|
123
|
+
calls.push({
|
|
124
|
+
name: item.name || item.function_name,
|
|
125
|
+
arguments: item.arguments || item.function_arguments || {}
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
// message-style: item.content is an array of parts
|
|
129
|
+
if (Array.isArray(item.content)) {
|
|
130
|
+
item.content.forEach(function (part) {
|
|
131
|
+
if (!part) { return; }
|
|
132
|
+
if (part.type === 'function_call') {
|
|
133
|
+
calls.push({
|
|
134
|
+
name: part.name || part.tool_name,
|
|
135
|
+
arguments: part.arguments || part.args || {}
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
return calls;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Detect "request too large / token limit" style errors from the Responses API.
|
|
146
|
+
function isTokenLimitError(err) {
|
|
147
|
+
if (!err || typeof err !== 'object') return false;
|
|
148
|
+
if (err.status !== 429 && err.status !== 400) return false;
|
|
149
|
+
const msg = (err.error && err.error.message) || err.message || '';
|
|
150
|
+
if (!msg) return false;
|
|
151
|
+
const lower = String(msg).toLowerCase();
|
|
152
|
+
// Cover common phrasing from OpenAI for context/TPM limits.
|
|
153
|
+
return (
|
|
154
|
+
lower.includes('request too large') ||
|
|
155
|
+
lower.includes('too many tokens') ||
|
|
156
|
+
lower.includes('max tokens') ||
|
|
157
|
+
lower.includes('maximum context length') ||
|
|
158
|
+
lower.includes('tokens per min')
|
|
159
|
+
);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// Create a compact representation of a JOE object for use in slim payloads.
|
|
163
|
+
function slimJOEObject(item) {
|
|
164
|
+
if (!item || typeof item !== 'object') return item;
|
|
165
|
+
const name = item.name || item.title || item.label || item.email || item.slug || item._id || '';
|
|
166
|
+
const info = item.info || item.description || item.summary || '';
|
|
167
|
+
return {
|
|
168
|
+
_id: item._id,
|
|
169
|
+
itemtype: item.itemtype,
|
|
170
|
+
name: name,
|
|
171
|
+
info: info
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// Given an `understandObject` result, produce a slimmed version:
|
|
176
|
+
// - keep `object` as-is
|
|
177
|
+
// - keep `flattened` for the main object (depth-limited) if present
|
|
178
|
+
// - replace each related entry with { field, _id, itemtype, object:{_id,itemtype,name,info} }
|
|
179
|
+
// - preserve `schemas`, `tags`, `statuses`, and mark `slim:true`
|
|
180
|
+
function slimUnderstandObjectResult(result) {
|
|
181
|
+
if (!result || typeof result !== 'object') return result;
|
|
182
|
+
const out = {
|
|
183
|
+
_id: result._id,
|
|
184
|
+
itemtype: result.itemtype,
|
|
185
|
+
object: result.object,
|
|
186
|
+
// retain main flattened view if available; this is typically much smaller
|
|
187
|
+
flattened: result.flattened || null,
|
|
188
|
+
schemas: result.schemas || {},
|
|
189
|
+
tags: result.tags || {},
|
|
190
|
+
statuses: result.statuses || {},
|
|
191
|
+
slim: true
|
|
192
|
+
};
|
|
193
|
+
if (Array.isArray(result.related)) {
|
|
194
|
+
out.related = result.related.map(function (rel) {
|
|
195
|
+
if (!rel) return rel;
|
|
196
|
+
const base = rel.object || {};
|
|
197
|
+
const slim = slimJOEObject(base);
|
|
198
|
+
return {
|
|
199
|
+
field: rel.field,
|
|
200
|
+
_id: slim && slim._id || rel._id,
|
|
201
|
+
itemtype: slim && slim.itemtype || rel.itemtype,
|
|
202
|
+
object: slim
|
|
203
|
+
};
|
|
204
|
+
});
|
|
205
|
+
} else {
|
|
206
|
+
out.related = [];
|
|
207
|
+
}
|
|
208
|
+
return out;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// Walk the messages array and, for any system message containing a JSON payload
|
|
212
|
+
// of the form { "tool": "understandObject", "result": {...} }, replace the
|
|
213
|
+
// result with a slimmed version to reduce token count. Returns a new array; if
|
|
214
|
+
// nothing was changed, returns the original array.
|
|
215
|
+
function shrinkUnderstandObjectMessagesForTokens(messages) {
|
|
216
|
+
if (!Array.isArray(messages)) return messages;
|
|
217
|
+
let changed = false;
|
|
218
|
+
const shrunk = messages.map(function (msg) {
|
|
219
|
+
if (!msg || msg.role !== 'system') return msg;
|
|
220
|
+
if (typeof msg.content !== 'string') return msg;
|
|
221
|
+
try {
|
|
222
|
+
const parsed = JSON.parse(msg.content);
|
|
223
|
+
if (!parsed || parsed.tool !== 'understandObject' || !parsed.result) {
|
|
224
|
+
return msg;
|
|
225
|
+
}
|
|
226
|
+
const slimmed = slimUnderstandObjectResult(parsed.result);
|
|
227
|
+
changed = true;
|
|
228
|
+
return {
|
|
229
|
+
...msg,
|
|
230
|
+
content: JSON.stringify({ tool: 'understandObject', result: slimmed })
|
|
231
|
+
};
|
|
232
|
+
} catch (_e) {
|
|
233
|
+
return msg;
|
|
234
|
+
}
|
|
235
|
+
});
|
|
236
|
+
return changed ? shrunk : messages;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* runWithTools
|
|
241
|
+
*
|
|
242
|
+
* Single orchestration function for calling the OpenAI Responses API
|
|
243
|
+
* with optional tools (sourced from a JOE `ai_assistant`), handling
|
|
244
|
+
* tool calls via MCP, and issuing a follow-up model call with the
|
|
245
|
+
* tool results injected.
|
|
246
|
+
*
|
|
247
|
+
* Inputs (opts):
|
|
248
|
+
* - openai: OpenAI client instance
|
|
249
|
+
* - model: model name to use (e.g. "gpt-4.1-mini", "gpt-5.1")
|
|
250
|
+
* - systemText: string of system / instructions text
|
|
251
|
+
* - messages: array of { role, content } for the conversation so far
|
|
252
|
+
* - assistant: JOE `ai_assistant` object (may contain `tools`)
|
|
253
|
+
* - req: Express request (passed into MCP tools as context)
|
|
254
|
+
*
|
|
255
|
+
* Returns:
|
|
256
|
+
* - { response, finalText, messages, toolCalls }
|
|
257
|
+
* where `finalText` is the assistant-facing text (from output_text)
|
|
258
|
+
* and `messages` is the possibly-extended message list including
|
|
259
|
+
* any synthetic `tool` messages.
|
|
260
|
+
*/
|
|
261
|
+
async function runWithTools(opts) {
|
|
262
|
+
const openai = opts.openai;
|
|
263
|
+
const model = opts.model;
|
|
264
|
+
const systemText = opts.systemText || "";
|
|
265
|
+
const messages = Array.isArray(opts.messages) ? opts.messages.slice() : [];
|
|
266
|
+
const assistant = opts.assistant || null;
|
|
267
|
+
const req = opts.req;
|
|
268
|
+
|
|
269
|
+
// Normalize tools: in many schemas tools may be stored as a JSON string;
|
|
270
|
+
// here we accept either an array or a JSON-stringified array.
|
|
271
|
+
let tools = null;
|
|
272
|
+
if (assistant && assistant.tools) {
|
|
273
|
+
if (Array.isArray(assistant.tools)) {
|
|
274
|
+
tools = assistant.tools;
|
|
275
|
+
} else if (typeof assistant.tools === 'string') {
|
|
276
|
+
try {
|
|
277
|
+
const parsed = JSON.parse(assistant.tools);
|
|
278
|
+
if (Array.isArray(parsed)) {
|
|
279
|
+
tools = parsed;
|
|
280
|
+
}
|
|
281
|
+
} catch (e) {
|
|
282
|
+
console.error('[chatgpt] Failed to parse assistant.tools JSON', e);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
// Normalize tool definitions for the Responses API. The assistant UI
|
|
287
|
+
// uses the Assistants-style shape ({ type:'function', function:{...} }),
|
|
288
|
+
// but Responses expects the name/description/parameters at the top level:
|
|
289
|
+
// { type:'function', name:'x', description:'...', parameters:{...} }
|
|
290
|
+
if (Array.isArray(tools)) {
|
|
291
|
+
tools = tools.map(function (t) {
|
|
292
|
+
if (t && t.type === 'function' && t.function && !t.name) {
|
|
293
|
+
const fn = t.function || {};
|
|
294
|
+
return {
|
|
295
|
+
type: 'function',
|
|
296
|
+
name: fn.name,
|
|
297
|
+
description: fn.description,
|
|
298
|
+
parameters: fn.parameters || {}
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
return t;
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// No tools configured – do a simple single Responses call.
|
|
306
|
+
if (!tools) {
|
|
307
|
+
const resp = await openai.responses.create({
|
|
308
|
+
model: model,
|
|
309
|
+
instructions: systemText,
|
|
310
|
+
input: messages
|
|
311
|
+
});
|
|
312
|
+
return {
|
|
313
|
+
response: resp,
|
|
314
|
+
finalText: resp.output_text || "",
|
|
315
|
+
messages: messages,
|
|
316
|
+
toolCalls: []
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// Step 1: call the model with tools enabled.
|
|
321
|
+
const first = await openai.responses.create({
|
|
322
|
+
model: model,
|
|
323
|
+
instructions: systemText,
|
|
324
|
+
input: messages,
|
|
325
|
+
tools: tools,
|
|
326
|
+
tool_choice: "auto"
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
const toolCalls = extractToolCalls(first);
|
|
330
|
+
|
|
331
|
+
// If the model didn't decide to use tools, just return the first answer.
|
|
332
|
+
if (!toolCalls.length) {
|
|
333
|
+
return {
|
|
334
|
+
response: first,
|
|
335
|
+
finalText: first.output_text || "",
|
|
336
|
+
messages: messages,
|
|
337
|
+
toolCalls: []
|
|
338
|
+
};
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
// Step 2: execute each tool call via MCP and append tool results.
|
|
342
|
+
for (let i = 0; i < toolCalls.length; i++) {
|
|
343
|
+
const tc = toolCalls[i];
|
|
344
|
+
try {
|
|
345
|
+
const result = await callMCPTool(tc.name, tc.arguments || {}, { req });
|
|
346
|
+
messages.push({
|
|
347
|
+
// Responses API does not support a "tool" role in messages.
|
|
348
|
+
// We inject tool outputs as a synthetic system message so
|
|
349
|
+
// the model can see the results without affecting the
|
|
350
|
+
// user/assistant turn structure.
|
|
351
|
+
role: "system",
|
|
352
|
+
content: JSON.stringify({ tool: tc.name, result: result })
|
|
353
|
+
});
|
|
354
|
+
} catch (e) {
|
|
355
|
+
console.error("[chatgpt] MCP tool error in runWithTools:", e);
|
|
356
|
+
messages.push({
|
|
357
|
+
role: "system",
|
|
358
|
+
content: JSON.stringify({
|
|
359
|
+
tool: tc.name,
|
|
360
|
+
error: e && e.message || "Tool execution failed"
|
|
361
|
+
})
|
|
362
|
+
});
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// Step 3: ask the model again with tool outputs included.
|
|
367
|
+
let finalMessages = messages;
|
|
368
|
+
let second;
|
|
369
|
+
try {
|
|
370
|
+
second = await openai.responses.create({
|
|
371
|
+
model: model,
|
|
372
|
+
instructions: systemText,
|
|
373
|
+
input: finalMessages
|
|
374
|
+
});
|
|
375
|
+
} catch (e) {
|
|
376
|
+
if (isTokenLimitError(e)) {
|
|
377
|
+
console.warn("[chatgpt] Responses token limit hit; shrinking understandObject payloads and retrying once");
|
|
378
|
+
const shrunk = shrinkUnderstandObjectMessagesForTokens(finalMessages);
|
|
379
|
+
// If nothing was shrunk, just rethrow the original error.
|
|
380
|
+
if (shrunk === finalMessages) {
|
|
381
|
+
throw e;
|
|
382
|
+
}
|
|
383
|
+
finalMessages = shrunk;
|
|
384
|
+
// Retry once with the smaller payload; let any error bubble up.
|
|
385
|
+
second = await openai.responses.create({
|
|
386
|
+
model: model,
|
|
387
|
+
instructions: systemText,
|
|
388
|
+
input: finalMessages
|
|
389
|
+
});
|
|
390
|
+
} else {
|
|
391
|
+
throw e;
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
return {
|
|
396
|
+
response: second,
|
|
397
|
+
finalText: second.output_text || "",
|
|
398
|
+
messages: finalMessages,
|
|
399
|
+
toolCalls: toolCalls
|
|
400
|
+
};
|
|
401
|
+
}
|
|
58
402
|
|
|
59
403
|
// function newClient(){
|
|
60
404
|
// var key = getAPIKey();
|
|
@@ -235,6 +579,35 @@ function ChatGPT() {
|
|
|
235
579
|
}
|
|
236
580
|
}
|
|
237
581
|
|
|
582
|
+
// Normalize model output that should contain JSON. Models often wrap JSON
|
|
583
|
+
// in markdown fences (```json ... ```); this helper strips fences and
|
|
584
|
+
// attempts to isolate the first well-formed JSON object substring.
|
|
585
|
+
function extractJsonText(raw) {
|
|
586
|
+
if (!raw) { return ''; }
|
|
587
|
+
let t = String(raw).trim();
|
|
588
|
+
// Strip leading ```... fence
|
|
589
|
+
if (t.startsWith('```')) {
|
|
590
|
+
const firstNewline = t.indexOf('\n');
|
|
591
|
+
if (firstNewline !== -1) {
|
|
592
|
+
t = t.substring(firstNewline + 1);
|
|
593
|
+
}
|
|
594
|
+
const lastFence = t.lastIndexOf('```');
|
|
595
|
+
if (lastFence !== -1) {
|
|
596
|
+
t = t.substring(0, lastFence);
|
|
597
|
+
}
|
|
598
|
+
t = t.trim();
|
|
599
|
+
}
|
|
600
|
+
// If there's extra prose around the JSON, slice from first { to last }
|
|
601
|
+
if (t[0] !== '{') {
|
|
602
|
+
const firstBrace = t.indexOf('{');
|
|
603
|
+
const lastBrace = t.lastIndexOf('}');
|
|
604
|
+
if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) {
|
|
605
|
+
t = t.slice(firstBrace, lastBrace + 1);
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
return t.trim();
|
|
609
|
+
}
|
|
610
|
+
|
|
238
611
|
// Autofill feature (Responses API; supports assistant_id or model)
|
|
239
612
|
this.autofill = async function (data, req, res) {
|
|
240
613
|
const startedAt = Date.now();
|
|
@@ -285,22 +658,11 @@ function ChatGPT() {
|
|
|
285
658
|
const openai = newClient();
|
|
286
659
|
const model = body.model || 'gpt-4o-mini';
|
|
287
660
|
|
|
661
|
+
// For simplicity and robustness, use plain text output and instruct the
|
|
662
|
+
// model to return a strict JSON object. We previously attempted the
|
|
663
|
+
// Responses `json_schema` response_format, but the SDK shape can change
|
|
664
|
+
// and is harder to parse reliably; text + JSON.parse is sufficient here.
|
|
288
665
|
const requestBase = {
|
|
289
|
-
response_format: {
|
|
290
|
-
type: 'json_schema',
|
|
291
|
-
json_schema: {
|
|
292
|
-
name: 'joe_autofill_patch',
|
|
293
|
-
strict: true,
|
|
294
|
-
schema: {
|
|
295
|
-
type: 'object',
|
|
296
|
-
additionalProperties: false,
|
|
297
|
-
required: ['patch'],
|
|
298
|
-
properties: {
|
|
299
|
-
patch: { type: 'object', additionalProperties: true }
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
},
|
|
304
666
|
temperature: 0.2,
|
|
305
667
|
instructions: systemText,
|
|
306
668
|
input: userInput
|
|
@@ -315,6 +677,7 @@ function ChatGPT() {
|
|
|
315
677
|
|
|
316
678
|
let textOut = '';
|
|
317
679
|
try { textOut = response.output_text || ''; } catch (_e) {}
|
|
680
|
+
coloredLog("textOut: "+textOut);
|
|
318
681
|
if (!textOut && response && Array.isArray(response.output)) {
|
|
319
682
|
for (let i = 0; i < response.output.length; i++) {
|
|
320
683
|
const item = response.output[i];
|
|
@@ -330,10 +693,13 @@ function ChatGPT() {
|
|
|
330
693
|
|
|
331
694
|
let patch = {};
|
|
332
695
|
try {
|
|
333
|
-
const
|
|
696
|
+
const jsonText = extractJsonText(textOut);
|
|
697
|
+
const parsed = JSON.parse(jsonText || '{}');
|
|
334
698
|
patch = parsed.patch || {};
|
|
335
|
-
} catch (_e) {
|
|
336
|
-
|
|
699
|
+
} catch (_e) {
|
|
700
|
+
console.warn('[chatgpt.autofill] Failed to parse JSON patch from model output', _e);
|
|
701
|
+
}
|
|
702
|
+
coloredLog("patch: "+JSON.stringify(patch));
|
|
337
703
|
const filteredPatch = {};
|
|
338
704
|
fields.forEach(function (f) {
|
|
339
705
|
if (Object.prototype.hasOwnProperty.call(patch, f)) {
|
|
@@ -565,19 +931,60 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
565
931
|
});
|
|
566
932
|
}
|
|
567
933
|
|
|
934
|
+
/**
|
|
935
|
+
* widgetStart
|
|
936
|
+
*
|
|
937
|
+
* Purpose:
|
|
938
|
+
* Create and persist a new `ai_widget_conversation` record for the
|
|
939
|
+
* external `<joe-ai-widget>` chat component. This is a lightweight
|
|
940
|
+
* conversation record that stores model, assistant, system text and
|
|
941
|
+
* messages for the widget.
|
|
942
|
+
*
|
|
943
|
+
* Inputs (data):
|
|
944
|
+
* - model (optional) override model for the widget
|
|
945
|
+
* - ai_assistant_id (optional) JOE ai_assistant cuid
|
|
946
|
+
* - system (optional) explicit system text
|
|
947
|
+
* - source (optional) freeform source tag, defaults to "widget"
|
|
948
|
+
*
|
|
949
|
+
* OpenAI calls:
|
|
950
|
+
* - None. This endpoint only touches storage.
|
|
951
|
+
*
|
|
952
|
+
* Output:
|
|
953
|
+
* - { success, conversation_id, model, assistant_id }
|
|
954
|
+
* where assistant_id is the OpenAI assistant_id (if present).
|
|
955
|
+
*/
|
|
568
956
|
this.widgetStart = async function (data, req, res) {
|
|
569
957
|
try {
|
|
570
958
|
var body = data || {};
|
|
571
|
-
|
|
959
|
+
// Default to a modern chat model when no assistant/model is provided.
|
|
960
|
+
// If an assistant is supplied, its ai_model will override this.
|
|
961
|
+
var model = body.model || "gpt-5.1";
|
|
572
962
|
var assistant = body.ai_assistant_id ? $J.get(body.ai_assistant_id) : null;
|
|
573
963
|
var system = body.system || (assistant && assistant.instructions) || "";
|
|
964
|
+
// Prefer explicit user fields coming from the client (ai-widget-test page
|
|
965
|
+
// passes _joe.User fields). Widget endpoints no longer infer from req.User
|
|
966
|
+
// to keep a single, explicit source of truth.
|
|
967
|
+
var user = null;
|
|
968
|
+
if (body.user_id || body.user_name || body.user_color) {
|
|
969
|
+
user = {
|
|
970
|
+
_id: body.user_id,
|
|
971
|
+
name: body.user_name,
|
|
972
|
+
fullname: body.user_name,
|
|
973
|
+
color: body.user_color
|
|
974
|
+
};
|
|
975
|
+
}
|
|
976
|
+
var user_color = (body.user_color) || (user && user.color) || null;
|
|
574
977
|
|
|
575
978
|
var convo = {
|
|
576
979
|
_id: (typeof cuid === 'function') ? cuid() : undefined,
|
|
577
980
|
itemtype: "ai_widget_conversation",
|
|
578
|
-
model: assistant && assistant.ai_model || model,
|
|
981
|
+
model: (assistant && assistant.ai_model) || model,
|
|
579
982
|
assistant: assistant && assistant._id,
|
|
580
983
|
assistant_id: assistant && assistant.assistant_id,
|
|
984
|
+
assistant_color: assistant && assistant.assistant_color,
|
|
985
|
+
user: user && user._id,
|
|
986
|
+
user_name: user && (user.fullname || user.name),
|
|
987
|
+
user_color: user_color,
|
|
581
988
|
system: system,
|
|
582
989
|
messages: [],
|
|
583
990
|
source: body.source || "widget",
|
|
@@ -586,17 +993,20 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
586
993
|
};
|
|
587
994
|
|
|
588
995
|
const saved = await new Promise(function (resolve, reject) {
|
|
996
|
+
// Widget conversations are lightweight and do not need full history diffs.
|
|
589
997
|
JOE.Storage.save(convo, "ai_widget_conversation", function (err, result) {
|
|
590
998
|
if (err) return reject(err);
|
|
591
999
|
resolve(result);
|
|
592
|
-
});
|
|
1000
|
+
}, { history: false });
|
|
593
1001
|
});
|
|
594
1002
|
|
|
595
1003
|
return {
|
|
596
1004
|
success: true,
|
|
597
1005
|
conversation_id: saved._id,
|
|
598
1006
|
model: saved.model,
|
|
599
|
-
assistant_id: saved.assistant_id || null
|
|
1007
|
+
assistant_id: saved.assistant_id || null,
|
|
1008
|
+
assistant_color: saved.assistant_color || null,
|
|
1009
|
+
user_color: saved.user_color || user_color || null
|
|
600
1010
|
};
|
|
601
1011
|
} catch (e) {
|
|
602
1012
|
console.error("[chatgpt] widgetStart error:", e);
|
|
@@ -604,6 +1014,22 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
604
1014
|
}
|
|
605
1015
|
};
|
|
606
1016
|
|
|
1017
|
+
/**
|
|
1018
|
+
* widgetHistory
|
|
1019
|
+
*
|
|
1020
|
+
* Purpose:
|
|
1021
|
+
* Load an existing `ai_widget_conversation` and normalize its
|
|
1022
|
+
* messages for use by `<joe-ai-widget>` on page load or refresh.
|
|
1023
|
+
*
|
|
1024
|
+
* Inputs (data):
|
|
1025
|
+
* - conversation_id or _id: the widget conversation cuid
|
|
1026
|
+
*
|
|
1027
|
+
* OpenAI calls:
|
|
1028
|
+
* - None. Purely storage + normalization.
|
|
1029
|
+
*
|
|
1030
|
+
* Output:
|
|
1031
|
+
* - { success, conversation_id, model, assistant_id, messages }
|
|
1032
|
+
*/
|
|
607
1033
|
this.widgetHistory = async function (data, req, res) {
|
|
608
1034
|
try {
|
|
609
1035
|
var conversation_id = data.conversation_id || data._id;
|
|
@@ -626,6 +1052,8 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
626
1052
|
conversation_id: convo._id,
|
|
627
1053
|
model: convo.model,
|
|
628
1054
|
assistant_id: convo.assistant_id || null,
|
|
1055
|
+
assistant_color: convo.assistant_color || null,
|
|
1056
|
+
user_color: convo.user_color || null,
|
|
629
1057
|
messages: convo.messages
|
|
630
1058
|
};
|
|
631
1059
|
} catch (e) {
|
|
@@ -634,6 +1062,35 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
634
1062
|
}
|
|
635
1063
|
};
|
|
636
1064
|
|
|
1065
|
+
/**
|
|
1066
|
+
* widgetMessage
|
|
1067
|
+
*
|
|
1068
|
+
* Purpose:
|
|
1069
|
+
* Handle a single user turn for `<joe-ai-widget>`:
|
|
1070
|
+
* - Append the user message to the stored conversation.
|
|
1071
|
+
* - Call OpenAI Responses (optionally with tools from the selected
|
|
1072
|
+
* `ai_assistant`, via runWithTools + MCP).
|
|
1073
|
+
* - Append the assistant reply, persist the conversation, and return
|
|
1074
|
+
* the full message history plus the latest assistant message.
|
|
1075
|
+
*
|
|
1076
|
+
* Inputs (data):
|
|
1077
|
+
* - conversation_id or _id: cuid of the widget conversation
|
|
1078
|
+
* - content: user text
|
|
1079
|
+
* - role: user role, defaults to "user"
|
|
1080
|
+
* - assistant_id: optional OpenAI assistant_id (used only to
|
|
1081
|
+
* locate the JOE ai_assistant config)
|
|
1082
|
+
* - model: optional model override
|
|
1083
|
+
*
|
|
1084
|
+
* OpenAI calls:
|
|
1085
|
+
* - responses.create (once if no tools; twice when tools are present):
|
|
1086
|
+
* * First call may include tools (assistant.tools) and `tool_choice:"auto"`.
|
|
1087
|
+
* * Any tool calls are executed via MCP and injected as `tool` messages.
|
|
1088
|
+
* * Second call is plain Responses with updated messages.
|
|
1089
|
+
*
|
|
1090
|
+
* Output:
|
|
1091
|
+
* - { success, conversation_id, model, assistant_id, messages,
|
|
1092
|
+
* last_message, usage }
|
|
1093
|
+
*/
|
|
637
1094
|
this.widgetMessage = async function (data, req, res) {
|
|
638
1095
|
try {
|
|
639
1096
|
var body = data || {};
|
|
@@ -662,30 +1119,80 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
662
1119
|
const userMsg = { role: role, content: content, created_at: nowIso };
|
|
663
1120
|
convo.messages.push(userMsg);
|
|
664
1121
|
|
|
665
|
-
|
|
1122
|
+
// Backfill user metadata (id/name/color) on older conversations that
|
|
1123
|
+
// were created before we started storing these fields. Prefer explicit
|
|
1124
|
+
// body fields only; we no longer infer from req.User so that widget
|
|
1125
|
+
// calls always have a single, explicit user source.
|
|
1126
|
+
var u = null;
|
|
1127
|
+
if (body.user_id || body.user_name || body.user_color) {
|
|
1128
|
+
u = {
|
|
1129
|
+
_id: body.user_id,
|
|
1130
|
+
name: body.user_name,
|
|
1131
|
+
fullname: body.user_name,
|
|
1132
|
+
color: body.user_color
|
|
1133
|
+
};
|
|
1134
|
+
}
|
|
1135
|
+
if (u) {
|
|
1136
|
+
if (!convo.user && u._id) {
|
|
1137
|
+
convo.user = u._id;
|
|
1138
|
+
}
|
|
1139
|
+
if (!convo.user_name && (u.fullname || u.name)) {
|
|
1140
|
+
convo.user_name = u.fullname || u.name;
|
|
1141
|
+
}
|
|
1142
|
+
if (!convo.user_color && u.color) {
|
|
1143
|
+
convo.user_color = u.color;
|
|
1144
|
+
}
|
|
1145
|
+
}
|
|
1146
|
+
|
|
666
1147
|
const assistantId = body.assistant_id || convo.assistant_id || null;
|
|
667
|
-
|
|
1148
|
+
// NOTE: assistantId here is the OpenAI assistant_id, not the JOE cuid.
|
|
1149
|
+
// We do NOT pass assistant_id to the Responses API (it is not supported in the
|
|
1150
|
+
// version we are using); instead we look up the JOE ai_assistant by assistant_id
|
|
1151
|
+
// and inject its configuration (model, instructions, tools) into the request.
|
|
1152
|
+
var assistantObj = null;
|
|
1153
|
+
if (assistantId && JOE && JOE.Data && Array.isArray(JOE.Data.ai_assistant)) {
|
|
1154
|
+
assistantObj = JOE.Data.ai_assistant.find(function (a) {
|
|
1155
|
+
return a && a.assistant_id === assistantId;
|
|
1156
|
+
}) || null;
|
|
1157
|
+
}
|
|
1158
|
+
const openai = newClient();
|
|
1159
|
+
const model = (assistantObj && assistantObj.ai_model) || convo.model || body.model || "gpt-5.1";
|
|
668
1160
|
|
|
669
|
-
|
|
1161
|
+
// Prefer explicit system text on the conversation, then assistant instructions.
|
|
1162
|
+
const systemText = (convo.system && String(convo.system)) ||
|
|
1163
|
+
(assistantObj && assistantObj.instructions) ||
|
|
1164
|
+
"";
|
|
670
1165
|
const messagesForModel = convo.messages.map(function (m) {
|
|
671
1166
|
return { role: m.role, content: m.content };
|
|
672
1167
|
});
|
|
673
1168
|
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
1169
|
+
// Use runWithTools so that, when an assistant has tools configured,
|
|
1170
|
+
// we let the model call those tools via MCP before generating a
|
|
1171
|
+
// final response.
|
|
1172
|
+
const runResult = await runWithTools({
|
|
1173
|
+
openai: openai,
|
|
1174
|
+
model: model,
|
|
1175
|
+
systemText: systemText,
|
|
1176
|
+
messages: messagesForModel,
|
|
1177
|
+
assistant: assistantObj,
|
|
1178
|
+
req: req
|
|
1179
|
+
});
|
|
1180
|
+
|
|
1181
|
+
// If tools were called this turn, inject a small meta message so the
|
|
1182
|
+
// widget clearly shows which functions ran before the assistant reply.
|
|
1183
|
+
if (runResult.toolCalls && runResult.toolCalls.length) {
|
|
1184
|
+
const names = runResult.toolCalls.map(function (tc) { return tc && tc.name; })
|
|
1185
|
+
.filter(Boolean)
|
|
1186
|
+
.join(', ');
|
|
1187
|
+
convo.messages.push({
|
|
1188
|
+
role: "assistant",
|
|
1189
|
+
meta: "tools_used",
|
|
1190
|
+
content: "[Tools used this turn: " + names + "]",
|
|
1191
|
+
created_at: nowIso
|
|
685
1192
|
});
|
|
686
1193
|
}
|
|
687
1194
|
|
|
688
|
-
const assistantText =
|
|
1195
|
+
const assistantText = runResult.finalText || "";
|
|
689
1196
|
const assistantMsg = {
|
|
690
1197
|
role: "assistant",
|
|
691
1198
|
content: assistantText,
|
|
@@ -696,10 +1203,11 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
696
1203
|
convo.joeUpdated = assistantMsg.created_at;
|
|
697
1204
|
|
|
698
1205
|
await new Promise(function (resolve, reject) {
|
|
1206
|
+
// Skip history for widget conversations to avoid heavy diffs / craydent.equals issues.
|
|
699
1207
|
JOE.Storage.save(convo, "ai_widget_conversation", function (err, saved) {
|
|
700
1208
|
if (err) return reject(err);
|
|
701
1209
|
resolve(saved);
|
|
702
|
-
});
|
|
1210
|
+
}, { history: false });
|
|
703
1211
|
});
|
|
704
1212
|
|
|
705
1213
|
return {
|
|
@@ -707,9 +1215,12 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
707
1215
|
conversation_id: convo._id,
|
|
708
1216
|
model: model,
|
|
709
1217
|
assistant_id: assistantId,
|
|
1218
|
+
assistant_color: (assistantObj && assistantObj.assistant_color) || convo.assistant_color || null,
|
|
1219
|
+
user_color: convo.user_color || ((u && u.color) || null),
|
|
710
1220
|
messages: convo.messages,
|
|
711
1221
|
last_message: assistantMsg,
|
|
712
|
-
|
|
1222
|
+
// Usage comes from the underlying Responses call inside runWithTools.
|
|
1223
|
+
usage: (runResult.response && runResult.response.usage) || {}
|
|
713
1224
|
};
|
|
714
1225
|
} catch (e) {
|
|
715
1226
|
console.error("[chatgpt] widgetMessage error:", e);
|
|
@@ -717,11 +1228,16 @@ this.executeJOEAiPrompt = async function(data, req, res) {
|
|
|
717
1228
|
}
|
|
718
1229
|
};
|
|
719
1230
|
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
1231
|
+
// Mark async plugin methods so Server.pluginHandling will await them.
|
|
1232
|
+
this.async = {
|
|
1233
|
+
executeJOEAiPrompt: this.executeJOEAiPrompt,
|
|
1234
|
+
testPrompt: this.testPrompt,
|
|
1235
|
+
sendInitialConsultTranscript: this.sendInitialConsultTranscript,
|
|
1236
|
+
widgetStart: this.widgetStart,
|
|
1237
|
+
widgetHistory: this.widgetHistory,
|
|
1238
|
+
widgetMessage: this.widgetMessage,
|
|
1239
|
+
autofill: this.autofill,
|
|
1240
|
+
};
|
|
725
1241
|
this.protected = [,'testPrompt'];
|
|
726
1242
|
return self;
|
|
727
1243
|
}
|