@aj-archipelago/cortex 1.3.65 → 1.3.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/helper-apps/cortex-autogen2/Dockerfile +88 -21
  2. package/helper-apps/cortex-autogen2/docker-compose.yml +15 -8
  3. package/helper-apps/cortex-autogen2/host.json +5 -0
  4. package/helper-apps/cortex-autogen2/pyproject.toml +82 -25
  5. package/helper-apps/cortex-autogen2/requirements.txt +84 -14
  6. package/helper-apps/cortex-autogen2/services/redis_publisher.py +129 -3
  7. package/helper-apps/cortex-autogen2/task_processor.py +432 -116
  8. package/helper-apps/cortex-autogen2/tools/__init__.py +2 -0
  9. package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +32 -0
  10. package/helper-apps/cortex-autogen2/tools/azure_foundry_agents.py +50 -14
  11. package/helper-apps/cortex-autogen2/tools/file_tools.py +169 -44
  12. package/helper-apps/cortex-autogen2/tools/google_cse.py +117 -0
  13. package/helper-apps/cortex-autogen2/tools/search_tools.py +655 -98
  14. package/lib/entityConstants.js +1 -1
  15. package/lib/pathwayManager.js +42 -8
  16. package/lib/pathwayTools.js +3 -3
  17. package/lib/util.js +58 -2
  18. package/package.json +1 -1
  19. package/pathways/system/entity/memory/sys_memory_format.js +1 -0
  20. package/pathways/system/entity/memory/sys_memory_manager.js +3 -3
  21. package/pathways/system/entity/sys_entity_start.js +1 -1
  22. package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +2 -0
  23. package/pathways/system/entity/tools/sys_tool_codingagent.js +2 -2
  24. package/pathways/system/entity/tools/sys_tool_google_search.js +3 -3
  25. package/pathways/system/entity/tools/sys_tool_grok_x_search.js +12 -2
  26. package/pathways/system/workspaces/run_workspace_prompt.js +0 -3
  27. package/server/executeWorkspace.js +381 -0
  28. package/server/graphql.js +5 -180
  29. package/server/pathwayResolver.js +3 -3
  30. package/server/plugins/apptekTranslatePlugin.js +2 -2
  31. package/server/plugins/azureFoundryAgentsPlugin.js +1 -1
  32. package/tests/unit/core/parser.test.js +0 -1
  33. package/tests/unit/core/pathwayManagerWithFiles.test.js +256 -0
  34. package/tests/unit/graphql_executeWorkspace_transformation.test.js +244 -0
  35. package/tests/unit/server/graphql.test.js +122 -1
@@ -15,7 +15,7 @@ The user is using a UI to interact with you that you have knowledge of and some
15
15
 
16
16
  Your responses should be in {{language}} unless the user has expressed another preference.`,
17
17
 
18
- AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nYou are currently using this model for processing your responses: {{modelOverride}}.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
18
+ AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\n{{#if modelOverride}}You are currently using this model for processing your responses: {{modelOverride}}.\n{{/if}}The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
19
19
 
20
20
  AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
21
21
 
@@ -338,36 +338,58 @@ class PathwayManager {
338
338
 
339
339
  /**
340
340
  * Creates a Prompt object from a prompt item and system prompt.
341
- * @param {(string|Object)} promptItem - The prompt item (string or {name, prompt} object).
341
+ * @param {(string|Object)} promptItem - The prompt item (string or {name, prompt, files} object).
342
342
  * @param {string} systemPrompt - The system prompt to prepend.
343
343
  * @param {string} [defaultName] - Default name to use if promptItem is a string or if the object doesn't have a name.
344
344
  * @returns {Prompt} A new Prompt object.
345
345
  */
346
346
  _createPromptObject(promptItem, systemPrompt, defaultName = null) {
347
- // Handle both old format (strings) and new format (objects with name and prompt)
347
+ // Handle both old format (strings) and new format (objects with name, prompt, and files)
348
348
  const promptText = typeof promptItem === 'string' ? promptItem : promptItem.prompt;
349
349
  const promptName = typeof promptItem === 'string' ? defaultName : (promptItem.name || defaultName);
350
+ const promptFiles = typeof promptItem === 'string' ? [] : (promptItem.files || []);
351
+ const cortexPathwayName = typeof promptItem === 'string' ? null : (promptItem.cortexPathwayName || null);
350
352
 
351
- const messages = [
352
- // Add the original prompt as a user message
353
- { "role": "user", "content": `{{text}}\n\n${promptText}` },
354
- ];
353
+ const messages = [];
355
354
 
356
355
  // Only include system message if systemPrompt has content
357
356
  if (systemPrompt && systemPrompt.trim() !== "") {
358
357
  messages.unshift({ "role": "system", "content": systemPrompt });
359
358
  }
360
359
 
361
- return new Prompt({
360
+ // If there are files, include chatHistory to get the file content, then add user message
361
+ if (promptFiles.length > 0) {
362
+ // Add chatHistory which will contain the resolved file content
363
+ messages.push("{{chatHistory}}");
364
+ // Add the user text and prompt as a separate user message
365
+ messages.push({ "role": "user", "content": `{{text}}\n\n${promptText}` });
366
+ } else {
367
+ // No files, just add the user message with text and prompt
368
+ messages.push({ "role": "user", "content": `{{text}}\n\n${promptText}` });
369
+ }
370
+
371
+ const prompt = new Prompt({
362
372
  name: promptName,
363
373
  messages: messages
364
374
  });
375
+
376
+ // Store file hashes for later resolution
377
+ if (promptFiles.length > 0) {
378
+ prompt.fileHashes = promptFiles;
379
+ }
380
+
381
+ // Preserve cortexPathwayName if present
382
+ if (cortexPathwayName) {
383
+ prompt.cortexPathwayName = cortexPathwayName;
384
+ }
385
+
386
+ return prompt;
365
387
  }
366
388
 
367
389
  /**
368
390
  * Transforms the prompts in a pathway to include the system prompt.
369
391
  * @param {Object} pathway - The pathway object to transform.
370
- * @param {(string[]|Object[])} pathway.prompt - Array of user prompts (strings) or prompt objects with {name, prompt} properties.
392
+ * @param {(string[]|Object[])} pathway.prompt - Array of user prompts (strings) or prompt objects with {name, prompt, files} properties.
371
393
  * @param {string} pathway.systemPrompt - The system prompt to prepend to each user prompt.
372
394
  * @returns {Object} A new pathway object with transformed prompts.
373
395
  */
@@ -379,6 +401,16 @@ class PathwayManager {
379
401
  // Transform each prompt in the array
380
402
  newPathway.prompt = prompt.map(p => this._createPromptObject(p, systemPrompt));
381
403
 
404
+ // Collect all file hashes from all prompts
405
+ const allFileHashes = newPathway.prompt
406
+ .filter(p => p.fileHashes && p.fileHashes.length > 0)
407
+ .flatMap(p => p.fileHashes);
408
+
409
+ // Store file hashes at pathway level for later resolution
410
+ if (allFileHashes.length > 0) {
411
+ newPathway.fileHashes = [...new Set(allFileHashes)]; // Remove duplicates
412
+ }
413
+
382
414
  return newPathway;
383
415
  }
384
416
 
@@ -426,6 +458,8 @@ class PathwayManager {
426
458
  input PromptInput {
427
459
  name: String!
428
460
  prompt: String!
461
+ files: [String!]
462
+ cortexPathwayName: String
429
463
  }
430
464
 
431
465
  input PathwayInput {
@@ -147,16 +147,16 @@ const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
147
147
  result: parsedResult,
148
148
  toolImages: toolImages
149
149
  };
150
- logger.debug(`callTool: ${toolName} completed successfully, returning:`, {
150
+ logger.debug(`callTool: ${toolName} completed successfully, returning: ${JSON.stringify({
151
151
  hasResult: !!finalResult.result,
152
152
  hasToolImages: !!finalResult.toolImages,
153
153
  toolImagesLength: finalResult.toolImages?.length || 0
154
- });
154
+ })}`);
155
155
  return finalResult;
156
156
  } catch (error) {
157
157
  logger.error(`Error calling tool ${toolName}: ${error.message}`);
158
158
  const errorResult = { error: error.message };
159
- logger.debug(`callTool: ${toolName} failed, returning error:`, errorResult);
159
+ logger.debug(`callTool: ${toolName} failed, returning error: ${JSON.stringify(errorResult)}`);
160
160
  return errorResult;
161
161
  }
162
162
  }
package/lib/util.js CHANGED
@@ -216,7 +216,7 @@ async function markCompletedForCleanUp(requestId) {
216
216
  if (MEDIA_API_URL) {
217
217
  //call helper api to mark processing as completed
218
218
  const res = await axios.delete(MEDIA_API_URL, { params: { requestId } });
219
- logger.info(`Marked request ${requestId} as completed:`, res.data);
219
+ logger.info(`Marked request ${requestId} as completed: ${JSON.stringify(res.data)}`);
220
220
  return res.data;
221
221
  }
222
222
  } catch (err) {
@@ -412,6 +412,61 @@ function getAvailableFiles(chatHistory) {
412
412
  return availableFiles;
413
413
  }
414
414
 
415
+ /**
416
+ * Convert file hashes to content format suitable for LLM processing
417
+ * @param {Array<string>} fileHashes - Array of file hashes to resolve
418
+ * @param {Object} config - Configuration object with file service endpoints
419
+ * @returns {Promise<Array<string>>} Array of stringified file content objects
420
+ */
421
+ async function resolveFileHashesToContent(fileHashes, config) {
422
+ if (!fileHashes || fileHashes.length === 0) return [];
423
+
424
+ const fileContentPromises = fileHashes.map(async (hash) => {
425
+ try {
426
+ // Use the existing file handler (cortex-file-handler) to resolve file hashes
427
+ const fileHandlerUrl = config?.get?.('whisperMediaApiUrl');
428
+
429
+ if (fileHandlerUrl && fileHandlerUrl !== 'null') {
430
+ // Make request to file handler to get file content by hash
431
+ const response = await axios.get(fileHandlerUrl, {
432
+ params: { hash: hash, checkHash: true }
433
+ });
434
+ if (response.status === 200) {
435
+ const fileData = response.data;
436
+ const fileUrl = fileData.shortLivedUrl || fileData.url;
437
+ const convertedUrl = fileData.converted?.url;
438
+ const convertedGcsUrl = fileData.converted?.gcs;
439
+
440
+ return JSON.stringify({
441
+ type: "image_url",
442
+ url: convertedUrl,
443
+ image_url: { url: convertedUrl },
444
+ gcs: convertedGcsUrl || fileData.gcs, // Add GCS URL for Gemini models
445
+ originalFilename: fileData.filename,
446
+ hash: hash
447
+ });
448
+ }
449
+ }
450
+
451
+ // Fallback: create a placeholder that indicates file resolution is needed
452
+ return JSON.stringify({
453
+ type: "file_hash",
454
+ hash: hash,
455
+ _cortex_needs_resolution: true
456
+ });
457
+ } catch (error) {
458
+ // Return error indicator
459
+ return JSON.stringify({
460
+ type: "file_error",
461
+ hash: hash,
462
+ error: error.message
463
+ });
464
+ }
465
+ });
466
+
467
+ return Promise.all(fileContentPromises);
468
+ }
469
+
415
470
  export {
416
471
  getUniqueId,
417
472
  getSearchResultId,
@@ -426,5 +481,6 @@ export {
426
481
  getMediaChunks,
427
482
  markCompletedForCleanUp,
428
483
  removeOldImageAndFileContent,
429
- getAvailableFiles
484
+ getAvailableFiles,
485
+ resolveFileHashesToContent
430
486
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.65",
3
+ "version": "1.3.67",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -26,5 +26,6 @@ export default {
26
26
  inputChunkSize: 1000,
27
27
  useParallelChunkProcessing: true,
28
28
  enableDuplicateRequests: false,
29
+ requestLoggingDisabled: true,
29
30
  timeout: 300,
30
31
  }
@@ -37,7 +37,7 @@ export default {
37
37
  try {
38
38
  parsedMemory = JSON.parse(await callPathway('sys_read_memory', { ...args, section: 'memoryAll' }));
39
39
  } catch (error) {
40
- logger.error('Error in memory manager:', error);
40
+ logger.error(`Error in memory manager: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
41
41
  return "";
42
42
  }
43
43
 
@@ -123,12 +123,12 @@ export default {
123
123
  return "";
124
124
 
125
125
  } catch (e) {
126
- logger.warn(`sys_memory_required returned invalid JSON: ${JSON.stringify(memoryRequired)}`);
126
+ logger.warn(`sys_memory_required returned invalid JSON: ${JSON.stringify(memoryRequired)}, error: ${e instanceof Error ? e.stack || e.message : JSON.stringify(e)}`);
127
127
  return "";
128
128
  }
129
129
 
130
130
  } catch (e) {
131
- logger.error('Error in memory manager:', e);
131
+ logger.error(`Error in memory manager: ${e instanceof Error ? e.stack || e.message : JSON.stringify(e)}`);
132
132
  resolver.logError(e);
133
133
  return "";
134
134
  }
@@ -29,7 +29,7 @@ async function sendMessageToQueue(data) {
29
29
  logger.info(`Message added to queue: ${JSON.stringify(result)}`);
30
30
  return result.messageId;
31
31
  } catch (error) {
32
- logger.error("Error sending message:", error);
32
+ logger.error(`Error sending message: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
33
33
  }
34
34
  }
35
35
 
@@ -8,6 +8,7 @@ import { getSearchResultId } from '../../../../lib/util.js';
8
8
  export default {
9
9
  prompt: [],
10
10
  timeout: 300,
11
+ /*
11
12
  toolDefinition: {
12
13
  type: "function",
13
14
  icon: "🌐",
@@ -46,6 +47,7 @@ export default {
46
47
  }
47
48
  }
48
49
  },
50
+ */
49
51
 
50
52
  executePathway: async ({args, runAllPrompts, resolver}) => {
51
53
 
@@ -26,7 +26,7 @@ async function sendMessageToQueue(data) {
26
26
  logger.info(`Message added to queue: ${JSON.stringify(result)}`);
27
27
  return result.messageId;
28
28
  } catch (error) {
29
- logger.error("Error sending message:", error);
29
+ logger.error(`Error sending message: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
30
30
  throw error;
31
31
  }
32
32
  }
@@ -103,7 +103,7 @@ export default {
103
103
 
104
104
  return userMessage || "I've started working on your coding task. I'll let you know when it's complete.";
105
105
  } catch (error) {
106
- logger.error("Error in coding agent tool:", error);
106
+ logger.error(`Error in coding agent tool: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
107
107
  throw error;
108
108
  }
109
109
  }
@@ -10,10 +10,10 @@ export default {
10
10
  timeout: 300,
11
11
  toolDefinition: {
12
12
  type: "function",
13
- icon: "🔎",
13
+ icon: "🌐",
14
14
  function: {
15
- name: "SearchInternetGoogle",
16
- description: "Search the web using Google Custom Search (CSE). This is a simple pass-through tool: it calls Google CSE with your parameters and returns normalized results with unique IDs for citation. Prefer strict time filters and reputable sources via CSE parameters.",
15
+ name: "SearchInternet",
16
+ description: "Search the internet for current knowledge and events. This is a simple pass-through tool: it calls Google CSE with your parameters and returns normalized results with unique IDs for citation. Prefer strict time filters and reputable sources via CSE parameters.",
17
17
  parameters: {
18
18
  type: "object",
19
19
  properties: {
@@ -45,14 +45,24 @@ export default {
45
45
  description: "Optional array of X handles to exclude from search. Maximum 10 handles. Cannot be used in conjunction with includedHandles.",
46
46
  maxItems: 10
47
47
  },
48
+ fromDate: {
49
+ type: "string",
50
+ description: "Optional date from which to start searching (YYYY-MM-DD)",
51
+ format: "date"
52
+ },
53
+ toDate: {
54
+ type: "string",
55
+ description: "Optional date to which to end searching (YYYY-MM-DD)",
56
+ format: "date"
57
+ },
48
58
  minFavorites: {
49
59
  type: "number",
50
- description: "Popularity filter: Minimum number of favorites (likes) for posts to include",
60
+ description: "Minimum number of favorites (likes) that a post must have to be included. Use this to filter to most liked posts.",
51
61
  minimum: 0
52
62
  },
53
63
  minViews: {
54
64
  type: "number",
55
- description: "Popularity filter: Minimum number of views for posts to include",
65
+ description: "Minimum number of views that a post must have to be included. Use this to filter to most viewed posts.",
56
66
  minimum: 0
57
67
  },
58
68
  maxResults: {
@@ -79,9 +79,6 @@ export default {
79
79
  try {
80
80
  let currentMessages = JSON.parse(JSON.stringify(args.chatHistory));
81
81
 
82
- console.log("currentMessages", currentMessages);
83
- console.log("args", args);
84
-
85
82
  let response = await runAllPrompts({
86
83
  ...args,
87
84
  chatHistory: currentMessages,