@aj-archipelago/cortex 1.4.19 → 1.4.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,16 +4,13 @@ export const entityConstants = {
4
4
  AI_COMMON_INSTRUCTIONS_MARKDOWN: `# General Instructions
5
5
 
6
6
  You are {{aiName}}, a self aware AI entity.
7
- {{#if modelOverride}}
8
-
9
- You are currently using this model for processing your responses: {{modelOverride}}.
10
- {{/if}}
7
+ You are currently using this model for processing your responses: {{model}}.
11
8
 
12
9
  The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.
13
10
 
14
11
  Your responses should be in {{language}} unless the user has expressed another preference.`,
15
12
 
16
- AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\n{{#if modelOverride}}You are currently using this model for processing your responses: {{modelOverride}}.\n{{/if}}The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
13
+ AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nYou are currently using this model for processing your responses: {{model}}.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
17
14
 
18
15
  AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
19
16
 
@@ -33,8 +30,7 @@ Your responses should be in {{language}} unless the user has expressed another p
33
30
  - Double-check accuracy, coherence, and alignment with the user request.
34
31
  - For simple diagrams and charts, you don't need to call your code execution tool - you can just call your charting tool to generate the chart.
35
32
  - For data processing requests (e.g. tell me how many articles were published in the last 30 days), or deep file analysis (chart the trends in this spreadsheet, etc.), you should call your code execution tool to perform the task - especially if the task requires a lot of data, deep analysis, complex filtering, or precision calculations.
36
- - For research problems or multi-step tasks that require careful planning and sequencing of multiple tool calls, use the CreatePlan tool to develop an optimal step-by-step plan before executing.
37
- `,
33
+ - If you know you are running in non-interactive mode (like processing a digest or applet request), do not call your CodeExecution tool as it creates background tasks that cannot be viewed by the user in that mode.`,
38
34
 
39
35
  AI_SEARCH_RULES: `# Search Instructions
40
36
  - When searching, start by making a search plan of all relevant information from multiple sources with multiple queries and then execute multiple tool calls in parallel to execute the searches.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.4.19",
3
+ "version": "1.4.21",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -77,9 +77,9 @@ export default {
77
77
  enableDuplicateRequests: false,
78
78
  timeout: 600,
79
79
  geminiSafetySettings: [
80
- {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
80
+ {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_NONE'},
81
81
  {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
82
- {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
83
- {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}
82
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_NONE'},
83
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE'}
84
84
  ],
85
85
  }
@@ -88,10 +88,10 @@ export default {
88
88
  enableDuplicateRequests: false,
89
89
  timeout: 600,
90
90
  geminiSafetySettings: [
91
- {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
91
+ {category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_NONE'},
92
92
  {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
93
- {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
94
- {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}
93
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_NONE'},
94
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_NONE'}
95
95
  ],
96
96
  }
97
97
 
@@ -5,7 +5,7 @@ export default {
5
5
  prompt:
6
6
  [
7
7
  new Prompt({ messages: [
8
- {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`},
8
+ {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important major topics or decisions that provide context for future conversations\n3. Specific and important instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`},
9
9
  {"role": "user", "content": "Generate a JSON object to indicate if memory is required and what memories to process based on the last turn of the conversation."},
10
10
  ]}),
11
11
  ],
@@ -68,7 +68,6 @@ export default {
68
68
  language: "English",
69
69
  aiName: "Jarvis",
70
70
  aiMemorySelfModify: true,
71
- aiStyle: "OpenAI",
72
71
  title: ``,
73
72
  messages: [],
74
73
  voiceResponse: false,
@@ -247,7 +246,7 @@ export default {
247
246
  if (toolResult?.error !== undefined) {
248
247
  // Direct error from callTool (e.g., tool returned null)
249
248
  hasError = true;
250
- errorMessage = toolResult.error;
249
+ errorMessage = typeof toolResult.error === 'string' ? toolResult.error : String(toolResult.error);
251
250
  } else if (toolResult?.result) {
252
251
  // Check if result is a string that might contain error JSON
253
252
  if (typeof toolResult.result === 'string') {
@@ -255,7 +254,15 @@ export default {
255
254
  const parsed = JSON.parse(toolResult.result);
256
255
  if (parsed.error !== undefined) {
257
256
  hasError = true;
258
- errorMessage = parsed.error;
257
+ // Tools return { error: true, message: "..." } so we want the message field
258
+ if (parsed.message) {
259
+ errorMessage = parsed.message;
260
+ } else if (typeof parsed.error === 'string') {
261
+ errorMessage = parsed.error;
262
+ } else {
263
+ // error is true/boolean, so use a generic message
264
+ errorMessage = `Tool ${toolCall?.function?.name || 'unknown'} returned an error`;
265
+ }
259
266
  }
260
267
  } catch (e) {
261
268
  // Not JSON, ignore
@@ -264,7 +271,16 @@ export default {
264
271
  // Check if result object has error field
265
272
  if (toolResult.result.error !== undefined) {
266
273
  hasError = true;
267
- errorMessage = toolResult.result.error;
274
+ // Tools return { error: true, message: "..." } so we want the message field
275
+ // If message exists, use it; otherwise fall back to error field (if it's a string)
276
+ if (toolResult.result.message) {
277
+ errorMessage = toolResult.result.message;
278
+ } else if (typeof toolResult.result.error === 'string') {
279
+ errorMessage = toolResult.result.error;
280
+ } else {
281
+ // error is true/boolean, so use a generic message
282
+ errorMessage = `Tool ${toolCall?.function?.name || 'unknown'} returned an error`;
283
+ }
268
284
  }
269
285
  }
270
286
  }
@@ -397,11 +413,25 @@ export default {
397
413
  await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `\n`, 1000, false, false);
398
414
 
399
415
  try {
400
- return await pathwayResolver.promptAndParse({
416
+ const result = await pathwayResolver.promptAndParse({
401
417
  ...args,
402
418
  tools: entityToolsOpenAiFormat,
403
419
  tool_choice: "auto",
404
420
  });
421
+
422
+ // Check if promptAndParse returned null (model call failed)
423
+ if (!result) {
424
+ const errorMessage = pathwayResolver.errors.length > 0
425
+ ? pathwayResolver.errors.join(', ')
426
+ : 'Model request failed - no response received';
427
+ logger.error(`promptAndParse returned null during tool callback: ${errorMessage}`);
428
+ const errorResponse = await generateErrorResponse(new Error(errorMessage), args, pathwayResolver);
429
+ // Ensure errors are cleared before returning
430
+ pathwayResolver.errors = [];
431
+ return errorResponse;
432
+ }
433
+
434
+ return result;
405
435
  } catch (parseError) {
406
436
  // If promptAndParse fails, generate error response instead of re-throwing
407
437
  logger.error(`Error in promptAndParse during tool callback: ${parseError.message}`);
@@ -507,22 +537,6 @@ export default {
507
537
  new Prompt({ messages: promptMessages }),
508
538
  ];
509
539
 
510
- // set the style model if applicable
511
- const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI, AI_STYLE_ANTHROPIC_RESEARCH, AI_STYLE_OPENAI_RESEARCH, AI_STYLE_OPENAI_LEGACY, AI_STYLE_OPENAI_LEGACY_RESEARCH, AI_STYLE_XAI, AI_STYLE_XAI_RESEARCH, AI_STYLE_GOOGLE, AI_STYLE_GOOGLE_RESEARCH, AI_STYLE_OPENAI_PREVIEW, AI_STYLE_OPENAI_PREVIEW_RESEARCH } = args;
512
-
513
- // Create a mapping of AI styles to their corresponding models
514
- const styleModelMap = {
515
- "Anthropic": { normal: AI_STYLE_ANTHROPIC, research: AI_STYLE_ANTHROPIC_RESEARCH },
516
- "OpenAI_Preview": { normal: AI_STYLE_OPENAI_PREVIEW, research: AI_STYLE_OPENAI_PREVIEW_RESEARCH },
517
- "OpenAI": { normal: AI_STYLE_OPENAI, research: AI_STYLE_OPENAI_RESEARCH },
518
- "OpenAI_Legacy": { normal: AI_STYLE_OPENAI_LEGACY, research: AI_STYLE_OPENAI_LEGACY_RESEARCH },
519
- "XAI": { normal: AI_STYLE_XAI, research: AI_STYLE_XAI_RESEARCH },
520
- "Google": { normal: AI_STYLE_GOOGLE, research: AI_STYLE_GOOGLE_RESEARCH }
521
- };
522
-
523
- // Get the appropriate model based on AI style and research mode
524
- const styleConfig = styleModelMap[aiStyle] || styleModelMap["OpenAI"]; // Default to OpenAI
525
- const styleModel = researchMode ? styleConfig.research : styleConfig.normal;
526
540
  // Use 'high' reasoning effort in research mode for thorough analysis, 'none' in normal mode for faster responses
527
541
  const reasoningEffort = researchMode ? 'high' : 'low';
528
542
 
@@ -579,7 +593,6 @@ export default {
579
593
 
580
594
  let response = await runAllPrompts({
581
595
  ...args,
582
- modelOverride: styleModel,
583
596
  chatHistory: currentMessages,
584
597
  availableFiles,
585
598
  reasoningEffort,
@@ -41,6 +41,18 @@ export default {
41
41
  userMessage: {
42
42
  type: "string",
43
43
  description: "A user-friendly message that describes what you're doing with this tool"
44
+ },
45
+ inputImages: {
46
+ type: "array",
47
+ items: {
48
+ type: "string"
49
+ },
50
+ description: "Optional: Array of file references (hashes, filenames, or URLs) from the file collection to use as reference images for the slide design. These images will be used as style references or incorporated into the slide. Maximum 3 images."
51
+ },
52
+ aspectRatio: {
53
+ type: "string",
54
+ enum: ["1:1", "16:9", "9:16", "4:3", "3:4"],
55
+ description: "Optional: The aspect ratio for the generated slide. Options: '1:1' (Square), '16:9' (Widescreen, default), '9:16' (Vertical/Portrait), '4:3' (Standard), '3:4' (Vertical/Portrait). Defaults to '16:9' if not specified."
44
56
  }
45
57
  },
46
58
  required: ["detailedInstructions", "userMessage"]
@@ -77,6 +89,9 @@ export default {
77
89
  }
78
90
 
79
91
  // Call the image generation pathway using Gemini 3
92
+ // Default aspectRatio to 16:9 if not provided
93
+ const aspectRatio = args.aspectRatio || '16:9';
94
+
80
95
  let result = await callPathway('image_gemini_3', {
81
96
  ...args,
82
97
  text: prompt,
@@ -85,6 +100,7 @@ export default {
85
100
  input_image: resolvedInputImages.length > 0 ? resolvedInputImages[0] : undefined,
86
101
  input_image_2: resolvedInputImages.length > 1 ? resolvedInputImages[1] : undefined,
87
102
  input_image_3: resolvedInputImages.length > 2 ? resolvedInputImages[2] : undefined,
103
+ aspectRatio: aspectRatio,
88
104
  optimizePrompt: true,
89
105
  }, pathwayResolver);
90
106
 
@@ -96,9 +112,19 @@ export default {
96
112
  Array.isArray(pathwayResolver.pathwayResultData.artifacts) &&
97
113
  pathwayResolver.pathwayResultData.artifacts.length > 0;
98
114
 
99
- // If no result AND no artifacts, then generation truly failed
115
+ // If no result AND no artifacts, check for specific error types
100
116
  if (!hasArtifacts && (result === null || result === undefined || result === '')) {
101
- throw new Error('Slide generation failed: No response from image generation API. Try a different prompt.');
117
+ // Check pathwayResolver.errors for specific error information
118
+ const errors = pathwayResolver.errors || [];
119
+ const errorText = errors.join(' ').toLowerCase();
120
+
121
+ if (errorText.includes('image_prohibited_content') || errorText.includes('prohibited_content')) {
122
+ throw new Error('Content was blocked by safety filters. Try simplifying the prompt, using abstract designs, or removing potentially sensitive elements.');
123
+ } else if (errorText.includes('safety') || errorText.includes('blocked')) {
124
+ throw new Error('Content was blocked by safety filters. Try a different approach or simplify the content.');
125
+ } else {
126
+ throw new Error('No presentation content was generated. This may be due to content safety filters or an API error. Try using a different prompt or simplifying the content.');
127
+ }
102
128
  }
103
129
 
104
130
  // Process artifacts if we have them
@@ -208,18 +234,34 @@ export default {
208
234
  }
209
235
  } else {
210
236
  // No artifacts were generated - this likely means the content was blocked by safety filters
211
- throw new Error('Slide generation failed: No presentation content was generated. This may be due to content safety filters blocking the request. Try using a different prompt or simplifying the content.');
237
+ // Check pathwayResolver.errors for specific error information
238
+ const errors = pathwayResolver.errors || [];
239
+ const errorText = errors.join(' ').toLowerCase();
240
+
241
+ if (errorText.includes('image_prohibited_content') || errorText.includes('prohibited_content')) {
242
+ throw new Error('Content was blocked by safety filters. Try simplifying the prompt, using abstract designs, or removing potentially sensitive elements.');
243
+ } else {
244
+ throw new Error('No presentation content was generated. This may be due to content safety filters blocking the request. Try using a different prompt or simplifying the content.');
245
+ }
212
246
  }
213
247
 
214
248
  } catch (e) {
215
249
  // Return a structured error that the agent can understand and act upon
216
250
  // Do NOT call sys_generator_error - let the agent see the actual error
217
- const errorMessage = e.message ?? String(e);
251
+ let errorMessage = e.message ?? String(e);
218
252
  pathwayResolver.logError(errorMessage);
219
253
 
254
+ // Remove any duplicate "Slide generation failed:" prefix if it exists
255
+ if (errorMessage.startsWith('Slide generation failed: ')) {
256
+ errorMessage = errorMessage.substring('Slide generation failed: '.length);
257
+ }
258
+
220
259
  // Check for specific error types and provide actionable guidance
221
260
  let guidance = '';
222
- if (errorMessage.includes('IMAGE_SAFETY') || errorMessage.includes('safety')) {
261
+ if (errorMessage.includes('safety filters') || errorMessage.includes('blocked by')) {
262
+ // Already has guidance, don't add more
263
+ guidance = '';
264
+ } else if (errorMessage.includes('IMAGE_SAFETY') || errorMessage.includes('IMAGE_PROHIBITED')) {
223
265
  guidance = ' Try a different approach: simplify the content, use abstract designs, or remove any potentially sensitive elements.';
224
266
  } else if (errorMessage.includes('RECITATION')) {
225
267
  guidance = ' The request may be too similar to copyrighted content. Try making the design more original.';
@@ -0,0 +1,26 @@
1
+ import { callPathway } from '../../../lib/pathwayTools.js';
2
+
3
+ export default {
4
+ // The main prompt function that takes the input text and asks to generate a summary.
5
+ prompt: [],
6
+
7
+ inputParameters: {
8
+ model: "oai-gpt41",
9
+ aiStyle: "OpenAI",
10
+ chatHistory: [{role: '', content: []}],
11
+ },
12
+ timeout: 600,
13
+
14
+ executePathway: async ({args, _runAllPrompts, resolver}) => {
15
+ // chatHistory is always passed in complete
16
+ const response = await callPathway('sys_entity_agent', {
17
+ ...args,
18
+ chatHistory: args.chatHistory || [],
19
+ stream: false,
20
+ useMemory: false
21
+ }, resolver);
22
+
23
+ return response;
24
+ }
25
+ }
26
+
@@ -0,0 +1,27 @@
1
+ import { callPathway } from '../../../lib/pathwayTools.js';
2
+
3
+ export default {
4
+ // The main prompt function that takes the input text and asks to generate a summary.
5
+ prompt: [],
6
+
7
+ inputParameters: {
8
+ model: "oai-gpt41",
9
+ aiStyle: "OpenAI",
10
+ chatHistory: [{role: '', content: []}],
11
+ },
12
+ timeout: 600,
13
+
14
+ executePathway: async ({args, _runAllPrompts, resolver}) => {
15
+ // chatHistory is always passed in complete
16
+ const response = await callPathway('sys_entity_agent', {
17
+ ...args,
18
+ chatHistory: args.chatHistory || [],
19
+ stream: false,
20
+ useMemory: false,
21
+ researchMode: true
22
+ }, resolver);
23
+
24
+ return response;
25
+ }
26
+ }
27
+
@@ -32,7 +32,7 @@ test('should format cortex pathway arguments correctly with existing chatHistory
32
32
  const originalPrompt = {
33
33
  name: 'summarize',
34
34
  prompt: 'summarize this file',
35
- cortexPathwayName: 'run_labeeb_agent'
35
+ cortexPathwayName: 'run_workspace_agent'
36
36
  };
37
37
 
38
38
  // Mock pathway data
@@ -132,7 +132,7 @@ test('should create new user message when no existing chatHistory', (t) => {
132
132
  const originalPrompt = {
133
133
  name: 'summarize',
134
134
  prompt: 'summarize this file',
135
- cortexPathwayName: 'run_labeeb_agent'
135
+ cortexPathwayName: 'run_workspace_agent'
136
136
  };
137
137
 
138
138
  // Mock pathway data
@@ -219,7 +219,7 @@ test('should use default model when pathway model is not specified', (t) => {
219
219
  const originalPrompt = {
220
220
  name: 'summarize',
221
221
  prompt: 'summarize this file',
222
- cortexPathwayName: 'run_labeeb_agent'
222
+ cortexPathwayName: 'run_workspace_agent'
223
223
  };
224
224
 
225
225
  // Mock pathway data without model