json-object-editor 0.10.650 → 0.10.654

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -392,7 +392,7 @@ ThoughtPipeline.compile = async function compile(pipelineId, scopeId, opts) {
392
392
  * @param {string} agentId
393
393
  * @param {string} userInput
394
394
  * @param {string} scopeId
395
- * @param {object} ctx - optional context (e.g., { req })
395
+ * @param {object} ctx - optional context (e.g., { req, model })
396
396
  */
397
397
  ThoughtPipeline.runAgent = async function runAgent(agentId, userInput, scopeId, ctx) {
398
398
  var agent = getAgent(agentId);
@@ -401,8 +401,11 @@ ThoughtPipeline.runAgent = async function runAgent(agentId, userInput, scopeId,
401
401
  var apiKey = getAPIKey();
402
402
  var openai = new OpenAI({ apiKey: apiKey });
403
403
 
404
+ var overrideModel = ctx && ctx.model;
405
+ var modelToUse = overrideModel || agent.model || 'gpt-4.1-mini';
406
+
404
407
  var response = await openai.responses.create({
405
- model: agent.model || 'gpt-4.1-mini',
408
+ model: modelToUse,
406
409
  instructions: agent.system_prompt,
407
410
  input: JSON.stringify({
408
411
  pipeline_id: compiled.pipeline_id,
@@ -440,6 +443,15 @@ ThoughtPipeline.runAgent = async function runAgent(agentId, userInput, scopeId,
440
443
  name: agent.name + ' → Thoughts',
441
444
  response_type: 'thought_generation',
442
445
  response: rawText || '',
446
+ response_json: (function () {
447
+ try {
448
+ var jt = extractJsonText(rawText);
449
+ if (!jt) return null;
450
+ return JSON.parse(jt);
451
+ } catch (_e) {
452
+ return null;
453
+ }
454
+ })(),
443
455
  response_id: response.id || '',
444
456
  user_prompt: userInput || '',
445
457
  model_used: agent.model || 'gpt-4.1-mini',
@@ -448,6 +460,12 @@ ThoughtPipeline.runAgent = async function runAgent(agentId, userInput, scopeId,
448
460
  usage: response.usage || {},
449
461
  prompt_method: 'ThoughtPipeline.runAgent'
450
462
  };
463
+ // Persist used OpenAI file ids when provided (audit convenience)
464
+ try{
465
+ if (ctx && Array.isArray(ctx.openai_file_ids) && ctx.openai_file_ids.length){
466
+ aiResponseObj.used_openai_file_ids = ctx.openai_file_ids.slice(0,10);
467
+ }
468
+ }catch(_e){}
451
469
 
452
470
  var savedResponse = await new Promise(function (resolve, reject) {
453
471
  try {
@@ -518,7 +536,9 @@ ThoughtPipeline.runAgent = async function runAgent(agentId, userInput, scopeId,
518
536
  created_by: 'agent:' + agent.id
519
537
  },
520
538
  source_ai_response: savedResponseId,
521
- created_by: 'agent:' + agent.id
539
+ created_by: 'agent:' + agent.id,
540
+ creator_type: 'agent',
541
+ creator_id: agent.id
522
542
  };
523
543
  }).filter(function (obj) {
524
544
  return obj.statement && obj.statement.length;
@@ -3,6 +3,7 @@ function AWSConnect(){
3
3
  this.default = function(data,req,res){
4
4
  // AWS SDK v3 (modular)
5
5
  const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3');
6
+ const chatgpt = require('./chatgpt.js');
6
7
  var settings_config = tryEval(JOE.Cache.settings.AWS_S3CONFIG)||{};
7
8
  var config = $c.merge(settings_config);
8
9
 
@@ -67,12 +68,41 @@ var response = {
67
68
  }
68
69
 
69
70
  s3.send(new PutObjectCommand(s3Params))
70
- .then(function(data){
71
+ .then(async function(data){
71
72
  // Construct canonical URL from region + bucket
72
73
  var region = config.region;
73
74
  var url = 'https://'+Bucket+'.s3.'+region+'.amazonaws.com/'+Key;
74
75
  response.data = data;
75
76
  response.url = url;
77
+ response.etag = data && (data.ETag || data.ETAG || data.eTag);
78
+
79
+ // If OpenAI key is configured, also upload to OpenAI Files (purpose: assistants)
80
+ try{
81
+ var hasOpenAIKey = !!JOE.Utils.Settings && !!JOE.Utils.Settings('OPENAI_API_KEY');
82
+ if(hasOpenAIKey){
83
+ // Prefer original buffer when provided via base64
84
+ if(data && typeof data === 'object'){ /* noop to keep linter happy */}
85
+ if(typeof s3Params.Body !== 'string' && s3Params.Body){
86
+ var filenameOnly = Key.split('/').pop();
87
+ var result = await chatgpt.filesUploadFromBufferHelper({
88
+ buffer: s3Params.Body,
89
+ filename: filenameOnly,
90
+ contentType: s3Params.ContentType,
91
+ purpose: 'assistants'
92
+ });
93
+ if(result && result.id){
94
+ response.openai_file_id = result.id;
95
+ response.openai_purpose = result.purpose || 'assistants';
96
+ }
97
+ }else{
98
+ // Fallback: if we didn't have a buffer (unlikely with current flow),
99
+ // skip immediate upload; client can use retry endpoint.
100
+ }
101
+ }
102
+ }catch(e){
103
+ // Non-fatal: S3 upload already succeeded
104
+ response.openai_error = (e && e.message) || String(e);
105
+ }
76
106
  res.status(200).send(response);
77
107
  console.log("Successfully uploaded data to "+Key);
78
108
  })
@@ -1,6 +1,8 @@
1
1
  const OpenAI = require("openai");
2
2
  const { google } = require('googleapis');
3
3
  const path = require('path');
4
+ const os = require('os');
5
+ const fs = require('fs');
4
6
  const MCP = require("../modules/MCP.js");
5
7
  // const { name } = require("json-object-editor/server/webconfig");
6
8
 
@@ -413,6 +415,109 @@ function shrinkUnderstandObjectMessagesForTokens(messages) {
413
415
  function newClient() {
414
416
  return new OpenAI({ apiKey: getAPIKey() });
415
417
  }
418
+
419
+ // Safely call Responses API with optional temperature/top_p.
420
+ // If the model rejects these parameters, strip and retry once.
421
+ async function safeResponsesCreate(openai, payload){
422
+ try{
423
+ return await openai.responses.create(payload);
424
+ }catch(e){
425
+ try{
426
+ var msg = (e && (e.error && e.error.message) || e.message || '').toLowerCase();
427
+ var badTemp = msg.includes("unsupported parameter") && msg.includes("temperature");
428
+ var badTopP = msg.includes("unsupported parameter") && msg.includes("top_p");
429
+ var unknownTemp = msg.includes("unknown parameter") && msg.includes("temperature");
430
+ var unknownTopP = msg.includes("unknown parameter") && msg.includes("top_p");
431
+ if (badTemp || badTopP || unknownTemp || unknownTopP){
432
+ var p2 = Object.assign({}, payload);
433
+ if (p2.hasOwnProperty('temperature')) delete p2.temperature;
434
+ if (p2.hasOwnProperty('top_p')) delete p2.top_p;
435
+ console.warn('[chatgpt] Retrying without temperature/top_p due to model rejection');
436
+ return await openai.responses.create(p2);
437
+ }
438
+ }catch(_e){ /* fallthrough */ }
439
+ throw e;
440
+ }
441
+ }
442
+
443
+ // Ensure a vector store exists with the provided file_ids indexed; returns { vectorStoreId }
444
+ async function ensureVectorStoreForFiles(fileIds = []){
445
+ const openai = newClient();
446
+ // Create ephemeral store per run (could be optimized to reuse/persist later)
447
+ const vs = await openai.vectorStores.create({ name: 'JOE Prompt Run '+Date.now() });
448
+ const storeId = vs.id;
449
+ // Link files by id
450
+ for (const fid of (fileIds||[]).slice(0,10)) {
451
+ try{
452
+ await openai.vectorStores.files.create(storeId, { file_id: fid });
453
+ }catch(e){
454
+ console.warn('[chatgpt] vectorStores.files.create failed for', fid, e && e.message || e);
455
+ }
456
+ }
457
+ // Poll (best-effort) until files are processed or timeout
458
+ const timeoutMs = 8000;
459
+ const start = Date.now();
460
+ try{
461
+ while(Date.now() - start < timeoutMs){
462
+ const listed = await openai.vectorStores.files.list(storeId, { limit: 100 });
463
+ const items = (listed && listed.data) || [];
464
+ const pending = items.some(f => f.status && f.status !== 'completed');
465
+ if(!pending){ break; }
466
+ await new Promise(r => setTimeout(r, 500));
467
+ }
468
+ }catch(_e){ /* non-fatal */ }
469
+ return { vectorStoreId: storeId };
470
+ }
471
+
472
+ // ---------------- OpenAI Files helpers ----------------
473
+ async function uploadFileFromBuffer(buffer, filename, contentType, purpose) {
474
+ const openai = newClient();
475
+ const usePurpose = purpose || 'assistants';
476
+ const tmpDir = os.tmpdir();
477
+ const safeName = filename || ('upload_' + Date.now());
478
+ const tmpPath = path.join(tmpDir, safeName);
479
+ await fs.promises.writeFile(tmpPath, buffer);
480
+ try {
481
+ // openai.files.create accepts a readable stream
482
+ const fileStream = fs.createReadStream(tmpPath);
483
+ const created = await openai.files.create({
484
+ purpose: usePurpose,
485
+ file: fileStream
486
+ });
487
+ return { id: created.id, purpose: usePurpose };
488
+ } finally {
489
+ // best-effort cleanup
490
+ fs.promises.unlink(tmpPath).catch(()=>{});
491
+ }
492
+ }
493
+
494
+ // Expose a helper that other plugins can call in-process
495
+ this.filesUploadFromBufferHelper = async function ({ buffer, filename, contentType, purpose }) {
496
+ if (!buffer || !buffer.length) {
497
+ throw new Error('Missing buffer');
498
+ }
499
+ return await uploadFileFromBuffer(buffer, filename, contentType, purpose || 'assistants');
500
+ };
501
+
502
+ // Public endpoint to retry OpenAI upload from a URL (e.g., S3 object URL)
503
+ this.filesRetryFromUrl = async function (data, req, res) {
504
+ try {
505
+ const { default: got } = await import('got');
506
+ const url = data && (data.url || data.location);
507
+ const filename = data && data.filename || (url && url.split('/').pop()) || ('upload_' + Date.now());
508
+ const contentType = data && data.contentType || undefined;
509
+ const purpose = 'assistants';
510
+ if (!url) {
511
+ return { success: false, error: 'Missing url' };
512
+ }
513
+ const resp = await got(url, { responseType: 'buffer' });
514
+ const buffer = resp.body;
515
+ const created = await uploadFileFromBuffer(buffer, filename, contentType, purpose);
516
+ return { success: true, openai_file_id: created.id, openai_purpose: created.purpose };
517
+ } catch (e) {
518
+ return { success: false, error: e && e.message || 'Retry upload failed' };
519
+ }
520
+ };
416
521
  this.testPrompt= async function(data, req, res) {
417
522
  try {
418
523
  var payload = {
@@ -432,7 +537,40 @@ function shrinkUnderstandObjectMessagesForTokens(messages) {
432
537
  model: 'gpt-4o',
433
538
  });
434
539
  coloredLog(chatCompletion);
435
- return {payload,chatCompletion,content:chatCompletion.choices[0].message.content};
540
+ const text = chatCompletion.choices && chatCompletion.choices[0] && chatCompletion.choices[0].message && chatCompletion.choices[0].message.content || '';
541
+ // Optionally persist as ai_response with parsed JSON when applicable
542
+ const parsed = (function(){
543
+ try {
544
+ const jt = extractJsonText(text);
545
+ return jt ? JSON.parse(jt) : null;
546
+ } catch(_e){ return null; }
547
+ })();
548
+ try {
549
+ var creator_type = null;
550
+ var creator_id = null;
551
+ try{
552
+ var u = req && req.User;
553
+ if (u && u._id){
554
+ creator_type = 'user';
555
+ creator_id = u._id;
556
+ }
557
+ }catch(_e){}
558
+ const aiResponse = {
559
+ itemtype: 'ai_response',
560
+ name: 'Test Prompt → ChatGPT',
561
+ response_type: 'testPrompt',
562
+ response: text,
563
+ response_json: parsed,
564
+ response_id: chatCompletion.id || '',
565
+ user_prompt: payload && payload.data && payload.data.prompt || 'Tell me a story about JOE: the json object editor in under 256 chars.',
566
+ model_used: 'gpt-4o',
567
+ created: (new Date()).toISOString(),
568
+ creator_type: creator_type,
569
+ creator_id: creator_id
570
+ };
571
+ JOE.Storage.save(aiResponse, 'ai_response', function(){}, { history: false, user: (req && req.User) || { name:'system' } });
572
+ } catch(_e){ /* best-effort only */ }
573
+ return {payload,chatCompletion,content:text};
436
574
  } catch (error) {
437
575
  if (error.status === 429) {
438
576
  return { errors: 'You exceeded your current quota, please check your plan and billing details.' };
@@ -498,7 +636,7 @@ function shrinkUnderstandObjectMessagesForTokens(messages) {
498
636
  response: chatContent,
499
637
  payload,
500
638
  prompt_method:req.params.method
501
- });
639
+ }, req && req.User);
502
640
  coloredLog("response saved -"+responseName);
503
641
  return {payload,
504
642
  businessOBJ,
@@ -549,8 +687,16 @@ function shrinkUnderstandObjectMessagesForTokens(messages) {
549
687
  return match ? match[1] : null;
550
688
  }
551
689
 
552
- async function saveAIResponse(data) {
690
+ async function saveAIResponse(data, user) {
553
691
  try {
692
+ var creator_type = null;
693
+ var creator_id = null;
694
+ try{
695
+ if (user && user._id){
696
+ creator_type = 'user';
697
+ creator_id = user._id;
698
+ }
699
+ }catch(_e){}
554
700
  const aiResponse = {
555
701
  name: data.name,
556
702
  itemtype: 'ai_response',
@@ -560,7 +706,9 @@ function shrinkUnderstandObjectMessagesForTokens(messages) {
560
706
  payload: data.payload,
561
707
  prompt_method:data.prompt_method,
562
708
  created: (new Date).toISOString(),
563
- _id:cuid()
709
+ _id:cuid(),
710
+ creator_type: creator_type,
711
+ creator_id: creator_id
564
712
  // Add any other fields you want to save
565
713
  };
566
714
  await new Promise((resolve, reject) => {
@@ -796,7 +944,8 @@ this.executeJOEAiPrompt = async function(data, req, res) {
796
944
  const referencedObjectIds = []; // Track all objects touched during helper function
797
945
  try {
798
946
  const promptId = data.ai_prompt;
799
- const params = data;
947
+ // Support both payload shapes: { ai_prompt, params:{...}, ... } and flat
948
+ const params = (data && (data.params || data)) || {};
800
949
 
801
950
  if (!promptId) {
802
951
  return { error: "Missing prompt_id." };
@@ -865,8 +1014,49 @@ this.executeJOEAiPrompt = async function(data, req, res) {
865
1014
  //return_token_usage: true
866
1015
  //max_tokens: prompt.max_tokens ?? 1200
867
1016
  };
868
-
869
- const response = await openai.responses.create(payload);
1017
+ coloredLog(`${payload.model} and ${payload.temperature}`);
1018
+ const mode = (prompt.attachments_mode || 'direct');
1019
+ if (Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1020
+ if (mode === 'file_search'){
1021
+ // Use file_search tool and attach vector store
1022
+ try{
1023
+ const ensured = await ensureVectorStoreForFiles(data.openai_file_ids);
1024
+ payload.tools = payload.tools || [];
1025
+ if(!payload.tools.find(t => t && t.type === 'file_search')){
1026
+ payload.tools.push({ type:'file_search' });
1027
+ }
1028
+ payload.tool_resources = Object.assign({}, payload.tool_resources, {
1029
+ file_search: { vector_store_ids: [ ensured.vectorStoreId ] }
1030
+ });
1031
+ // Keep input as text only (if any)
1032
+ if (finalInput && String(finalInput).trim().length){
1033
+ payload.input = finalInput;
1034
+ }
1035
+ }catch(e){
1036
+ console.warn('[chatgpt] file_search setup failed; falling back to direct parts', e && e.message || e);
1037
+ // Fall back to direct parts
1038
+ const parts = [];
1039
+ if (finalInput && String(finalInput).trim().length){
1040
+ parts.push({ type:'input_text', text: String(finalInput) });
1041
+ }
1042
+ data.openai_file_ids.slice(0,10).forEach(function(id){
1043
+ parts.push({ type:'input_file', file_id: id });
1044
+ });
1045
+ payload.input = [ { role:'user', content: parts } ];
1046
+ }
1047
+ } else {
1048
+ // Direct context stuffing: input parts
1049
+ const parts = [];
1050
+ if (finalInput && String(finalInput).trim().length){
1051
+ parts.push({ type:'input_text', text: String(finalInput) });
1052
+ }
1053
+ data.openai_file_ids.slice(0,10).forEach(function(id){
1054
+ parts.push({ type:'input_file', file_id: id });
1055
+ });
1056
+ payload.input = [ { role:'user', content: parts } ];
1057
+ }
1058
+ }
1059
+ const response = await safeResponsesCreate(openai, payload);
870
1060
 
871
1061
 
872
1062
  // const payload = createResponsePayload(prompt, params, instructions, data.user_prompt);
@@ -880,8 +1070,18 @@ this.executeJOEAiPrompt = async function(data, req, res) {
880
1070
  params,
881
1071
  referenced_object_ids: referencedObjectIds,
882
1072
  response_id:response.id,
883
- usage: response.usage || {}
1073
+ usage: response.usage || {},
1074
+ user: req && req.User,
1075
+ ai_assistant_id: data.ai_assistant_id
884
1076
  });
1077
+ try{
1078
+ if (saved && Array.isArray(data.openai_file_ids) && data.openai_file_ids.length){
1079
+ saved.used_openai_file_ids = data.openai_file_ids.slice(0,10);
1080
+ await new Promise(function(resolve){
1081
+ JOE.Storage.save(saved,'ai_response',function(){ resolve(); },{ user: req && req.User, history:false });
1082
+ });
1083
+ }
1084
+ }catch(_e){}
885
1085
 
886
1086
  return { success: true, ai_response_id: saved._id,response:response.output_text || "",usage:response.usage };
887
1087
  } catch (e) {
@@ -903,13 +1103,34 @@ this.executeJOEAiPrompt = async function(data, req, res) {
903
1103
  max_tokens: prompt.max_tokens ?? 1200
904
1104
  };
905
1105
  }
906
- async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage}) {
1106
+ async function saveAiResponseRefactor({ prompt, ai_response_content, user_prompt, params, referenced_object_ids,response_id,usage,user,ai_assistant_id}) {
907
1107
  var response_keys = [];
908
1108
  try {
909
1109
  response_keys = Object.keys(JSON.parse(ai_response_content));
910
1110
  }catch (e) {
911
1111
  console.error('❌ Error parsing AI response content for keys:', e);
912
1112
  }
1113
+ // Best-effort parse into JSON for downstream agents (Thought pipeline, etc.)
1114
+ let parsedResponse = null;
1115
+ try {
1116
+ const jt = extractJsonText(ai_response_content);
1117
+ if (jt) {
1118
+ parsedResponse = JSON.parse(jt);
1119
+ }
1120
+ } catch(_e) {
1121
+ parsedResponse = null;
1122
+ }
1123
+ var creator_type = null;
1124
+ var creator_id = null;
1125
+ try{
1126
+ if (ai_assistant_id){
1127
+ creator_type = 'ai_assistant';
1128
+ creator_id = ai_assistant_id;
1129
+ } else if (user && user._id){
1130
+ creator_type = 'user';
1131
+ creator_id = user._id;
1132
+ }
1133
+ }catch(_e){}
913
1134
  const aiResponse = {
914
1135
  name: `${prompt.name}`,
915
1136
  itemtype: 'ai_response',
@@ -917,6 +1138,7 @@ this.executeJOEAiPrompt = async function(data, req, res) {
917
1138
  prompt_name: prompt.name,
918
1139
  prompt_method:prompt.prompt_method,
919
1140
  response: ai_response_content,
1141
+ response_json: parsedResponse,
920
1142
  response_keys: response_keys,
921
1143
  response_id:response_id||'',
922
1144
  user_prompt: user_prompt,
@@ -926,7 +1148,9 @@ this.executeJOEAiPrompt = async function(data, req, res) {
926
1148
  model_used: prompt.ai_model || "gpt-4o",
927
1149
  referenced_objects: referenced_object_ids, // new flexible array of referenced object ids
928
1150
  created: (new Date).toISOString(),
929
- _id: cuid()
1151
+ _id: cuid(),
1152
+ creator_type: creator_type,
1153
+ creator_id: creator_id
930
1154
  };
931
1155
 
932
1156
  await new Promise((resolve, reject) => {
@@ -1262,6 +1486,7 @@ this.executeJOEAiPrompt = async function(data, req, res) {
1262
1486
  widgetHistory: this.widgetHistory,
1263
1487
  widgetMessage: this.widgetMessage,
1264
1488
  autofill: this.autofill,
1489
+ filesRetryFromUrl: this.filesRetryFromUrl
1265
1490
  };
1266
1491
  this.protected = [,'testPrompt'];
1267
1492
  return self;
@@ -0,0 +1,90 @@
1
+ var schema = {
2
+ title: "AI Pipeline | ${name}",
3
+ display: "AI Pipeline",
4
+ info: "Configurable AI context pipelines composed of ordered steps, used to compile context for agents like the Thought Engine.",
5
+ summary: {
6
+ description: "Declarative definition of AI context pipelines: an ordered list of steps (schema summaries, thoughts, objects, etc.) that feed agents.",
7
+ purpose: "Use ai_pipeline to configure what context is compiled for a given agent run (e.g., which schemas, thoughts, and objects are included) without changing code.",
8
+ labelField: "name",
9
+ defaultSort: { field: "created", dir: "desc" },
10
+ searchableFields: ["name", "pipeline_id", "info", "_id"],
11
+ allowedSorts: ["created", "joeUpdated", "name"],
12
+ relationships: {
13
+ outbound: [
14
+ // Future: agents that reference this pipeline_id
15
+ ],
16
+ inbound: { graphRef: "server/relationships.graph.json" }
17
+ },
18
+ joeManagedFields: ["created", "joeUpdated"],
19
+ fields: [
20
+ { name: "_id", type: "string", required: true },
21
+ { name: "itemtype", type: "string", required: true, const: "ai_pipeline" },
22
+ { name: "name", type: "string", required: true },
23
+ { name: "pipeline_id", type: "string", required: true },
24
+ { name: "info", type: "string" },
25
+ // Steps are minimal, but expressive enough to mirror current PIPELINES config
26
+ { name: "steps", type: "objectList" },
27
+ { name: "joeUpdated", type: "string", format: "date-time" },
28
+ { name: "created", type: "string", format: "date-time" }
29
+ ]
30
+ },
31
+ listView: {
32
+ title: function (p) {
33
+ return `
34
+ <joe-subtext>${_joe.Utils.prettyPrintDTS(p.created)}</joe-subtext>
35
+ <joe-title>${p.name}</joe-title>
36
+ <joe-subtitle>${p.pipeline_id || ""}</joe-subtitle>
37
+ `;
38
+ },
39
+ listWindowTitle: "AI Pipelines"
40
+ },
41
+ sorter: ["!created", "name"],
42
+ fields: [
43
+ "name",
44
+ { name: "pipeline_id", width: "50%", comment: "Logical id used by agents and ThoughtPipeline (e.g., thought_default, protocol_planning)." },
45
+ "info",
46
+
47
+ { section_start: "steps", display: "Pipeline Steps", collapsed: false },
48
+ {
49
+ name: "steps",
50
+ type: "objectList",
51
+ display: "Steps",
52
+ comment: "Ordered steps that define how context is compiled.",
53
+ properties: [
54
+ { name: "id", width: "20%", comment: "Stable step id (e.g., schema_summaries, accepted_thoughts)." },
55
+ {
56
+ name: "step_type",
57
+ type: "select",
58
+ values: ["schema_summaries", "thoughts", "objects", "scope_object", "text", "tools"],
59
+ width: "15%",
60
+ comment: "Determines how this step is executed."
61
+ },
62
+ {
63
+ name: "render_mode",
64
+ type: "select",
65
+ values: ["json", "compact_json", "bullets", "text"],
66
+ width: "15%",
67
+ comment: "How this step is rendered into the agent prompt."
68
+ },
69
+ { name: "required", type: "boolean", width: "10%", comment: "If true, pipeline fails when this step cannot be resolved." },
70
+ {
71
+ name: "selector",
72
+ type: "code",
73
+ language: "json",
74
+ width: "40%",
75
+ comment: "Free-form selector JSON (e.g., { \"names\": [\"thought\",\"ai_prompt\" ] } or { \"query\": {\"itemtype\":\"thought\"}, \"sortBy\":\"joeUpdated\" } )."
76
+ }
77
+ ]
78
+ },
79
+ { section_end: "steps" },
80
+
81
+ { section_start: "system", collapsed: true },
82
+ "_id",
83
+ "created",
84
+ "itemtype",
85
+ { section_end: "system" }
86
+ ],
87
+ idprop: "_id"
88
+ };
89
+
90
+ module.exports = schema;
@@ -29,6 +29,7 @@ var schema = {
29
29
  { name:'instructions_format', type:'string' },
30
30
  { name:'instructions', type:'string' },
31
31
  { name:'user_prompt', type:'string' },
32
+ { name:'attachments_mode', type:'string', display:'Attachments Mode', enumValues:['direct','file_search'], default:'direct' },
32
33
  { name:'status', type:'string', isReference:true, targetSchema:'status' },
33
34
  { name:'tags', type:'string', isArray:true, isReference:true, targetSchema:'tag' },
34
35
  { name:'ai_model', type:'string' },
@@ -162,6 +163,7 @@ var schema = {
162
163
  {section_end:'workflow'},
163
164
  {section_start:'openAi',collapsed:true},
164
165
  'ai_model',
166
+ {name:'attachments_mode', type:'select', display:'Attachments Mode', values:['direct','file_search'], default:'direct', comment:'direct = include files in prompt as context; file_search = index files and retrieve relevant chunks'},
165
167
  {name:'temperature', type:'number',display:'Temperature', default:.7, step:"0.1",comment:'0-1, 0 is deterministic, 1 is random'},
166
168
  //{name:'max_tokens', type:'number',display:'Max Tokens',comment:'max tokens to return',default:4096},
167
169
  {section_end:'openAi'},