@aj-archipelago/cortex 1.4.19 → 1.4.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.4.19",
3
+ "version": "1.4.20",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -77,9 +77,9 @@ export default {
77
77
  enableDuplicateRequests: false,
78
78
  timeout: 600,
79
79
  geminiSafetySettings: [
80
- {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
80
+ {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_NONE'},
81
81
  {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
82
- {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
83
- {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}
82
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_NONE'},
83
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE'}
84
84
  ],
85
85
  }
@@ -88,10 +88,10 @@ export default {
88
88
  enableDuplicateRequests: false,
89
89
  timeout: 600,
90
90
  geminiSafetySettings: [
91
- {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
91
+ {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_NONE'},
92
92
  {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
93
- {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
94
- {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}
93
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_NONE'},
94
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE'}
95
95
  ],
96
96
  }
97
97
 
@@ -5,7 +5,7 @@ export default {
5
5
  prompt:
6
6
  [
7
7
  new Prompt({ messages: [
8
- {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`},
8
+ {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important major topics or decisions that provide context for future conversations\n3. Specific and important instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`},
9
9
  {"role": "user", "content": "Generate a JSON object to indicate if memory is required and what memories to process based on the last turn of the conversation."},
10
10
  ]}),
11
11
  ],
@@ -247,7 +247,7 @@ export default {
247
247
  if (toolResult?.error !== undefined) {
248
248
  // Direct error from callTool (e.g., tool returned null)
249
249
  hasError = true;
250
- errorMessage = toolResult.error;
250
+ errorMessage = typeof toolResult.error === 'string' ? toolResult.error : String(toolResult.error);
251
251
  } else if (toolResult?.result) {
252
252
  // Check if result is a string that might contain error JSON
253
253
  if (typeof toolResult.result === 'string') {
@@ -255,7 +255,15 @@ export default {
255
255
  const parsed = JSON.parse(toolResult.result);
256
256
  if (parsed.error !== undefined) {
257
257
  hasError = true;
258
- errorMessage = parsed.error;
258
+ // Tools return { error: true, message: "..." } so we want the message field
259
+ if (parsed.message) {
260
+ errorMessage = parsed.message;
261
+ } else if (typeof parsed.error === 'string') {
262
+ errorMessage = parsed.error;
263
+ } else {
264
+ // error is true/boolean, so use a generic message
265
+ errorMessage = `Tool ${toolCall?.function?.name || 'unknown'} returned an error`;
266
+ }
259
267
  }
260
268
  } catch (e) {
261
269
  // Not JSON, ignore
@@ -264,7 +272,16 @@ export default {
264
272
  // Check if result object has error field
265
273
  if (toolResult.result.error !== undefined) {
266
274
  hasError = true;
267
- errorMessage = toolResult.result.error;
275
+ // Tools return { error: true, message: "..." } so we want the message field
276
+ // If message exists, use it; otherwise fall back to error field (if it's a string)
277
+ if (toolResult.result.message) {
278
+ errorMessage = toolResult.result.message;
279
+ } else if (typeof toolResult.result.error === 'string') {
280
+ errorMessage = toolResult.result.error;
281
+ } else {
282
+ // error is true/boolean, so use a generic message
283
+ errorMessage = `Tool ${toolCall?.function?.name || 'unknown'} returned an error`;
284
+ }
268
285
  }
269
286
  }
270
287
  }
@@ -397,11 +414,25 @@ export default {
397
414
  await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `\n`, 1000, false, false);
398
415
 
399
416
  try {
400
- return await pathwayResolver.promptAndParse({
417
+ const result = await pathwayResolver.promptAndParse({
401
418
  ...args,
402
419
  tools: entityToolsOpenAiFormat,
403
420
  tool_choice: "auto",
404
421
  });
422
+
423
+ // Check if promptAndParse returned null (model call failed)
424
+ if (!result) {
425
+ const errorMessage = pathwayResolver.errors.length > 0
426
+ ? pathwayResolver.errors.join(', ')
427
+ : 'Model request failed - no response received';
428
+ logger.error(`promptAndParse returned null during tool callback: ${errorMessage}`);
429
+ const errorResponse = await generateErrorResponse(new Error(errorMessage), args, pathwayResolver);
430
+ // Ensure errors are cleared before returning
431
+ pathwayResolver.errors = [];
432
+ return errorResponse;
433
+ }
434
+
435
+ return result;
405
436
  } catch (parseError) {
406
437
  // If promptAndParse fails, generate error response instead of re-throwing
407
438
  logger.error(`Error in promptAndParse during tool callback: ${parseError.message}`);
@@ -41,6 +41,18 @@ export default {
41
41
  userMessage: {
42
42
  type: "string",
43
43
  description: "A user-friendly message that describes what you're doing with this tool"
44
+ },
45
+ inputImages: {
46
+ type: "array",
47
+ items: {
48
+ type: "string"
49
+ },
50
+ description: "Optional: Array of file references (hashes, filenames, or URLs) from the file collection to use as reference images for the slide design. These images will be used as style references or incorporated into the slide. Maximum 3 images."
51
+ },
52
+ aspectRatio: {
53
+ type: "string",
54
+ enum: ["1:1", "16:9", "9:16", "4:3", "3:4"],
55
+ description: "Optional: The aspect ratio for the generated slide. Options: '1:1' (Square), '16:9' (Widescreen, default), '9:16' (Vertical/Portrait), '4:3' (Standard), '3:4' (Vertical/Portrait). Defaults to '16:9' if not specified."
44
56
  }
45
57
  },
46
58
  required: ["detailedInstructions", "userMessage"]
@@ -77,6 +89,9 @@ export default {
77
89
  }
78
90
 
79
91
  // Call the image generation pathway using Gemini 3
92
+ // Default aspectRatio to 16:9 if not provided
93
+ const aspectRatio = args.aspectRatio || '16:9';
94
+
80
95
  let result = await callPathway('image_gemini_3', {
81
96
  ...args,
82
97
  text: prompt,
@@ -85,6 +100,7 @@ export default {
85
100
  input_image: resolvedInputImages.length > 0 ? resolvedInputImages[0] : undefined,
86
101
  input_image_2: resolvedInputImages.length > 1 ? resolvedInputImages[1] : undefined,
87
102
  input_image_3: resolvedInputImages.length > 2 ? resolvedInputImages[2] : undefined,
103
+ aspectRatio: aspectRatio,
88
104
  optimizePrompt: true,
89
105
  }, pathwayResolver);
90
106
 
@@ -96,9 +112,19 @@ export default {
96
112
  Array.isArray(pathwayResolver.pathwayResultData.artifacts) &&
97
113
  pathwayResolver.pathwayResultData.artifacts.length > 0;
98
114
 
99
- // If no result AND no artifacts, then generation truly failed
115
+ // If no result AND no artifacts, check for specific error types
100
116
  if (!hasArtifacts && (result === null || result === undefined || result === '')) {
101
- throw new Error('Slide generation failed: No response from image generation API. Try a different prompt.');
117
+ // Check pathwayResolver.errors for specific error information
118
+ const errors = pathwayResolver.errors || [];
119
+ const errorText = errors.join(' ').toLowerCase();
120
+
121
+ if (errorText.includes('image_prohibited_content') || errorText.includes('prohibited_content')) {
122
+ throw new Error('Content was blocked by safety filters. Try simplifying the prompt, using abstract designs, or removing potentially sensitive elements.');
123
+ } else if (errorText.includes('safety') || errorText.includes('blocked')) {
124
+ throw new Error('Content was blocked by safety filters. Try a different approach or simplify the content.');
125
+ } else {
126
+ throw new Error('No presentation content was generated. This may be due to content safety filters or an API error. Try using a different prompt or simplifying the content.');
127
+ }
102
128
  }
103
129
 
104
130
  // Process artifacts if we have them
@@ -208,18 +234,34 @@ export default {
208
234
  }
209
235
  } else {
210
236
  // No artifacts were generated - this likely means the content was blocked by safety filters
211
- throw new Error('Slide generation failed: No presentation content was generated. This may be due to content safety filters blocking the request. Try using a different prompt or simplifying the content.');
237
+ // Check pathwayResolver.errors for specific error information
238
+ const errors = pathwayResolver.errors || [];
239
+ const errorText = errors.join(' ').toLowerCase();
240
+
241
+ if (errorText.includes('image_prohibited_content') || errorText.includes('prohibited_content')) {
242
+ throw new Error('Content was blocked by safety filters. Try simplifying the prompt, using abstract designs, or removing potentially sensitive elements.');
243
+ } else {
244
+ throw new Error('No presentation content was generated. This may be due to content safety filters blocking the request. Try using a different prompt or simplifying the content.');
245
+ }
212
246
  }
213
247
 
214
248
  } catch (e) {
215
249
  // Return a structured error that the agent can understand and act upon
216
250
  // Do NOT call sys_generator_error - let the agent see the actual error
217
- const errorMessage = e.message ?? String(e);
251
+ let errorMessage = e.message ?? String(e);
218
252
  pathwayResolver.logError(errorMessage);
219
253
 
254
+ // Remove any duplicate "Slide generation failed:" prefix if it exists
255
+ if (errorMessage.startsWith('Slide generation failed: ')) {
256
+ errorMessage = errorMessage.substring('Slide generation failed: '.length);
257
+ }
258
+
220
259
  // Check for specific error types and provide actionable guidance
221
260
  let guidance = '';
222
- if (errorMessage.includes('IMAGE_SAFETY') || errorMessage.includes('safety')) {
261
+ if (errorMessage.includes('safety filters') || errorMessage.includes('blocked by')) {
262
+ // Already has guidance, don't add more
263
+ guidance = '';
264
+ } else if (errorMessage.includes('IMAGE_SAFETY') || errorMessage.includes('IMAGE_PROHIBITED')) {
223
265
  guidance = ' Try a different approach: simplify the content, use abstract designs, or remove any potentially sensitive elements.';
224
266
  } else if (errorMessage.includes('RECITATION')) {
225
267
  guidance = ' The request may be too similar to copyrighted content. Try making the design more original.';