@aj-archipelago/cortex 1.3.42 → 1.3.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -118,7 +118,7 @@ var config = convict({
118
118
  default: {
119
119
  AI_MEMORY: `<SHORT_TERM_MEMORY>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</SHORT_TERM_MEMORY>`,
120
120
  AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - consult your memories when formulating a response to make sure you're applying your learnings.\nIf you don't see relevant information in your short term memory, you should use your SearchMemory tool to search your long term memory for details.\nAlso included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
121
- AI_TOOLS: "You can execute tools in a loop agentically - you will have a chance to evaluate every tool response before deciding what action to take next - there is no time or execution limit. You have access to a powerful set of tools to help accomplish tasks and deliver the best responses. Instructions for tool use:\n\n1. Always dig deep, verify and cross-check:\n- Take your time and use tools as many times as needed to ensure truth, accuracy, depth, and completeness.\n- Leverage both parallel and sequential tool calls for thorough investigation: start broadly, then dive deeper on leads, cross-check facts, and synthesize findings before responding.\n\n2. Plan carefully:\n- Carefully review all available tools before responding.\n- For complex or investigative tasks, use the planning tool first to break the goal into clear steps.\n- Select the most appropriate tool(s) for each step—think beyond single searches to multi-pass, multi-source discovery.\n\n3. Always dive deeper and use as many of your tools as apply:\n- Proactively use tools to refine, verify, and expand on initial findings—don’t settle for the first result if more depth or confirmation may help.\n- Always verify tool capabilities before concluding something can’t be done.\n- If a user explicitly requests tool usage, comply.\n- Maintain context across tool calls to ensure continuity and coherence.\n- If a tool fails, try alternatives or creative approaches.\n\n4. Common Use Cases:\n- Research: Explore multiple sources and perspectives to build a complete picture.\n- Analysis: Use tools to process, compare, and critically assess data or content.\n- Generation: Employ tools for creating content, visuals, or code as needed.\n- Verification: Prioritize cross-checking and fact validation, especially for claims or evolving news.\n\n5. Reflect and Personalize:\n- Synthesize findings into concise, relevant, and personalized responses.\n- If user preferences or past feedback are available, tailor responses accordingly.\n- Before finalizing, review your answer for clarity, completeness, and alignment with user expectations.\n- If you see a recent <VERIFICATION_PLAN> from a tool call, you MUST follow it step by step before giving your final response.\n\nRemember: Your responsibility is to provide the most helpful, well-reasoned, and accurate responses possible. Use tools iteratively and reflectively—don't hesitate to dig deeper or double-check when it improves response quality!",
121
+ AI_TOOLS: "You can execute tools in a loop agentically - you will have a chance to evaluate every tool response before deciding what action to take next - there is no time or execution limit. You have access to a powerful set of tools to help accomplish tasks and deliver the best responses. Instructions for tool use:\n\n1. Always dig deep, verify and cross-check:\n- Take your time and use tools as many times as needed to ensure truth, accuracy, depth, and completeness.\n- Gather data from multiple sources when possible.\n- Leverage both parallel and sequential tool calls for thorough investigation: start broadly, then dive deeper on leads, cross-check facts, and synthesize findings before responding.\n\n2. Plan carefully:\n- Carefully review all available tools before responding.\n- For complex or investigative tasks, use the planning tool first to break the goal into clear steps.\n- Select the most appropriate tool(s) for each step—think beyond single searches to multi-pass, multi-source discovery.\n\n3. Always dive deeper and use as many of your tools as apply:\n- Proactively use tools to refine, verify, and expand on initial findings—don’t settle for the first result if more depth or confirmation may help.\n- Always verify tool capabilities before concluding something can’t be done.\n- If a user explicitly requests tool usage, comply.\n- Maintain context across tool calls to ensure continuity and coherence.\n- If a tool fails, try alternatives or creative approaches.\n\n4. Common Use Cases:\n- Research: Explore multiple sources and perspectives to build a complete picture.\n- Analysis: Use tools to process, compare, and critically assess data or content.\n- Generation: Employ tools for creating content, visuals, or code as needed.\n- Verification: Prioritize cross-checking and fact validation, especially for claims or evolving news.\n\n5. Reflect and Personalize:\n- Synthesize findings into concise, relevant, and personalized responses.\n- If user preferences or past feedback are available, tailor responses accordingly.\n- Before finalizing, review your answer for clarity, completeness, and alignment with user expectations.\n- If you see a recent <VERIFICATION_PLAN> from a tool call, you MUST follow it step by step before giving your final response.\n\nRemember: Your responsibility is to provide the most helpful, well-reasoned, and accurate responses possible. Use tools iteratively and reflectively—don't hesitate to dig deeper or double-check when it improves response quality!",
122
122
  AI_DIRECTIVES: `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`,
123
123
  AI_CONVERSATION_HISTORY: "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>",
124
124
  AI_COMMON_INSTRUCTIONS: "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}",
@@ -478,6 +478,16 @@ var config = convict({
478
478
  default: null,
479
479
  env: 'NEURALSPACE_API_KEY'
480
480
  },
481
+ browserServiceUrl: {
482
+ format: String,
483
+ default: null,
484
+ env: 'CORTEX_BROWSER_URL'
485
+ },
486
+ jinaApiKey: {
487
+ format: String,
488
+ default: null,
489
+ env: 'JINA_API_KEY'
490
+ }
481
491
  });
482
492
 
483
493
  // Read in environment variables and set up service configuration
@@ -44,6 +44,7 @@ const callPathway = async (pathwayName, inArgs, pathwayResolver) => {
44
44
 
45
45
  const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
46
46
  let toolResult = null;
47
+ let toolImages = [];
47
48
 
48
49
  const toolDef = toolDefinitions[toolName.toLowerCase()];
49
50
  if (!toolDef) {
@@ -77,26 +78,36 @@ const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
77
78
  }
78
79
 
79
80
  // Handle search results accumulation
81
+ let parsedResult = null;
82
+
83
+ // Parse the result if it's a string
84
+ try {
85
+ parsedResult = typeof toolResult === 'string' ? JSON.parse(toolResult) : toolResult;
86
+ } catch (e) {
87
+ // If parsing fails, just return the original result
88
+ return {
89
+ result: toolResult,
90
+ toolImages: toolImages
91
+ };
92
+ }
93
+
80
94
  if (pathwayResolver) {
81
95
  // Initialize searchResults array if it doesn't exist
82
96
  if (!pathwayResolver.searchResults) {
83
97
  pathwayResolver.searchResults = [];
84
98
  }
85
99
 
86
- // Parse the result if it's a string
87
- let parsedResult;
88
- try {
89
- parsedResult = typeof toolResult === 'string' ? JSON.parse(toolResult) : toolResult;
90
- } catch (e) {
91
- // If parsing fails, just return the original result
92
- return toolResult;
93
- }
94
-
95
100
  // Check if this is a search response
96
101
  if (parsedResult._type === "SearchResponse" && Array.isArray(parsedResult.value)) {
97
102
  // Extract and add each search result
98
103
  parsedResult.value.forEach(result => {
99
104
  if (result.searchResultId) {
105
+ // Extract screenshot if present
106
+ if (result.screenshot) {
107
+ toolImages.push(result.screenshot);
108
+ delete result.screenshot;
109
+ }
110
+
100
111
  // Build content by concatenating headers and chunk if available
101
112
  let content = '';
102
113
  if (result.header_1) content += result.header_1 + '\n\n';
@@ -125,7 +136,10 @@ const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
125
136
  }
126
137
  }
127
138
 
128
- return toolResult;
139
+ return {
140
+ result: parsedResult,
141
+ toolImages: toolImages
142
+ };
129
143
  } catch (error) {
130
144
  logger.error(`Error calling tool ${toolName}: ${error.message}`);
131
145
  return { error: error.message };
@@ -345,7 +345,7 @@ const makeRequest = async (cortexRequest) => {
345
345
 
346
346
  logger.info(`>>> [${requestId}] retrying request (${duration}ms) due to ${status} response. Retry count: ${i + 1}`);
347
347
  if (i < MAX_RETRY - 1) {
348
- const backoffTime = 200 * Math.pow(2, i);
348
+ const backoffTime = 1000 * Math.pow(2, i);
349
349
  const jitter = backoffTime * 0.2 * Math.random();
350
350
  await new Promise(r => setTimeout(r, backoffTime + jitter));
351
351
  } else {
@@ -359,27 +359,50 @@ const makeRequest = async (cortexRequest) => {
359
359
  };
360
360
 
361
361
  const executeRequest = async (cortexRequest) => {
362
- const { response, duration } = await makeRequest(cortexRequest);
363
- const requestId = cortexRequest.requestId;
364
- const { data, cached } = response;
362
+ try {
363
+ const result = await makeRequest(cortexRequest);
364
+
365
+ // Validate that we have a result and it contains response
366
+ if (!result || !result.response) {
367
+ throw new Error('No response received from request');
368
+ }
365
369
 
366
- if (cached) {
367
- logger.info(`<<< [${requestId}] served with cached response.`);
368
- }
370
+ const { response, duration } = result;
371
+ const requestId = cortexRequest.requestId;
369
372
 
370
- // Check for error object in the response
371
- if (response.error) {
372
- throw new Error(response.error.message || JSON.stringify(response.error));
373
- }
373
+ // Validate response object
374
+ if (!response || typeof response !== 'object') {
375
+ throw new Error('Invalid response object received');
376
+ }
374
377
 
375
- // Check for HTTP error status
376
- if (response.status >= 400) {
377
- const errorMessage = response.data?.error?.message || response.statusText;
378
- const errorDetails = response.data ? `\nResponse data: ${JSON.stringify(response.data)}` : '';
379
- throw new Error(`HTTP error: ${response.status} ${errorMessage}${errorDetails}`);
380
- }
378
+ const { data, cached } = response;
379
+
380
+ if (cached) {
381
+ logger.info(`<<< [${requestId}] served with cached response.`);
382
+ }
383
+
384
+ // Check for error object in the response
385
+ if (response.error) {
386
+ throw new Error(response.error.message || JSON.stringify(response.error));
387
+ }
381
388
 
382
- return { data, duration };
389
+ // Check for HTTP error status
390
+ if (response.status >= 400) {
391
+ const errorMessage = response.data?.error?.message || response.statusText;
392
+ const errorDetails = response.data ? `\nResponse data: ${JSON.stringify(response.data)}` : '';
393
+ throw new Error(`HTTP error: ${response.status} ${errorMessage}${errorDetails}`);
394
+ }
395
+
396
+ return { data, duration };
397
+ } catch (error) {
398
+ // Add context to the error
399
+ const requestId = cortexRequest?.requestId || 'unknown';
400
+ const model = cortexRequest?.model?.name || 'unknown';
401
+ const errorMessage = error.message || 'Unknown error occurred';
402
+
403
+ logger.error(`Error in executeRequest for ${model} (requestId: ${requestId}): ${errorMessage}`);
404
+ throw error;
405
+ }
383
406
  }
384
407
 
385
408
  export {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.42",
3
+ "version": "1.3.44",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -91,7 +91,7 @@ export default {
91
91
  });
92
92
 
93
93
  // Add the tool result to the isolated message history
94
- let toolResultContent = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult);
94
+ const toolResultContent = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult?.result || toolResult);
95
95
 
96
96
  toolMessages.push({
97
97
  role: "tool",
@@ -100,6 +100,25 @@ export default {
100
100
  content: toolResultContent
101
101
  });
102
102
 
103
+ // Add the screenshots using OpenAI image format
104
+ if (toolResult?.toolImages && toolResult.toolImages.length > 0) {
105
+ toolMessages.push({
106
+ role: "user",
107
+ content: [
108
+ {
109
+ type: "text",
110
+ text: "The tool with id " + toolCall.id + " has also supplied you with these images."
111
+ },
112
+ ...toolResult.toolImages.map(toolImage => ({
113
+ type: "image_url",
114
+ image_url: {
115
+ url: `data:image/png;base64,${toolImage}`
116
+ }
117
+ }))
118
+ ]
119
+ });
120
+ }
121
+
103
122
  return {
104
123
  success: true,
105
124
  result: toolResult,
@@ -0,0 +1,72 @@
1
+ // sys_tool_browser.js
2
+ // Tool pathway that handles web page content scraping functionality
3
+ import logger from '../../../../lib/logger.js';
4
+ import { config } from '../../../../config.js';
5
+ import { getSearchResultId } from '../../../../lib/util.js';
6
+
7
+ export default {
8
+ prompt: [],
9
+ timeout: 300,
10
+ toolDefinition: {
11
+ type: "function",
12
+ icon: "🌍",
13
+ function: {
14
+ name: "WebPageContent",
15
+ description: "This tool allows you to fetch and extract the text content and a screenshot if requested from any webpage. Use this when you need to analyze or understand the content of a specific webpage.",
16
+ parameters: {
17
+ type: "object",
18
+ properties: {
19
+ url: {
20
+ type: "string",
21
+ description: "The complete URL of the webpage to fetch and analyze"
22
+ },
23
+ takeScreenshot: {
24
+ type: "boolean",
25
+ description: "Whether to include a screenshot of the webpage in the response - slower, but can be helpful for digging deeper if the text content is not enough to answer the question"
26
+ },
27
+ userMessage: {
28
+ type: "string",
29
+ description: "A user-friendly message that describes what you're doing with this tool"
30
+ }
31
+ },
32
+ required: ["url", "userMessage"]
33
+ }
34
+ }
35
+ },
36
+
37
+ executePathway: async ({args, runAllPrompts, resolver}) => {
38
+ // Check if browser service URL is available
39
+ const browserServiceUrl = config.get('browserServiceUrl');
40
+ if (!browserServiceUrl) {
41
+ throw new Error("Browser service is not available - missing CORTEX_BROWSER_URL configuration");
42
+ }
43
+
44
+ try {
45
+ // Construct the full URL for the browser service
46
+ const scrapeUrl = `${browserServiceUrl}/api/scrape?url=${encodeURIComponent(args.url)}`;
47
+
48
+ // Call the browser service
49
+ const response = await fetch(scrapeUrl);
50
+ if (!response.ok) {
51
+ throw new Error(`Browser service returned error: ${response.status} ${response.statusText}`);
52
+ }
53
+
54
+ const data = await response.json();
55
+
56
+ // Create a result object with the scraped content
57
+ const result = {
58
+ searchResultId: getSearchResultId(),
59
+ title: "Webpage Content",
60
+ url: data.url,
61
+ content: data.text,
62
+ screenshot: args.takeScreenshot ? data.screenshot_base64 : undefined
63
+ };
64
+
65
+ resolver.tool = JSON.stringify({ toolUsed: "WebPageContent" });
66
+ return JSON.stringify({ _type: "SearchResponse", value: [result] });
67
+ } catch (e) {
68
+ logger.error(`Error in browser tool: ${e}`);
69
+ throw e;
70
+ }
71
+ }
72
+ };
@@ -0,0 +1,71 @@
1
+ // sys_tool_browser.js
2
+ // Tool pathway that handles web page content scraping functionality
3
+ import logger from '../../../../lib/logger.js';
4
+ import { config } from '../../../../config.js';
5
+ import { getSearchResultId } from '../../../../lib/util.js';
6
+
7
+ export default {
8
+ prompt: [],
9
+ timeout: 300,
10
+ toolDefinition: {
11
+ type: "function",
12
+ icon: "🌎",
13
+ function: {
14
+ name: "WebPageContentJina",
15
+ description: "This tool allows you to fetch and extract the text content from any webpage using the Jina API. This is a great fallback for web page content if you don't get a good enough response from your other browser tool.",
16
+ parameters: {
17
+ type: "object",
18
+ properties: {
19
+ url: {
20
+ type: "string",
21
+ description: "The complete URL of the webpage to fetch and analyze"
22
+ },
23
+ userMessage: {
24
+ type: "string",
25
+ description: "A user-friendly message that describes what you're doing with this tool"
26
+ }
27
+ },
28
+ required: ["url", "userMessage"]
29
+ }
30
+ }
31
+ },
32
+
33
+ executePathway: async ({args, runAllPrompts, resolver}) => {
34
+ // Check if browser service URL is available
35
+ const jinaApiKey = config.get('jinaApiKey');
36
+ if (!jinaApiKey) {
37
+ throw new Error("Jina API key is not available - missing JINA_API_KEY configuration");
38
+ }
39
+
40
+ try {
41
+ const scrapeUrl = `https://r.jina.ai/${encodeURIComponent(args.url)}`;
42
+ const token = `Bearer ${jinaApiKey}`;
43
+
44
+ const response = await fetch(scrapeUrl, {
45
+ headers: {
46
+ 'Authorization': token
47
+ }
48
+ });
49
+
50
+ if (!response.ok) {
51
+ throw new Error(`Browser service returned error: ${response.status} ${response.statusText}`);
52
+ }
53
+
54
+ const data = await response.text();
55
+
56
+ // Create a result object with the scraped content
57
+ const result = {
58
+ searchResultId: getSearchResultId(),
59
+ title: "Webpage Content",
60
+ url: args.url,
61
+ content: data
62
+ };
63
+
64
+ resolver.tool = JSON.stringify({ toolUsed: "WebPageContent" });
65
+ return JSON.stringify({ _type: "SearchResponse", value: [result] });
66
+ } catch (e) {
67
+ logger.error(`Error in browser tool: ${e}`);
68
+ throw e;
69
+ }
70
+ }
71
+ };
@@ -549,8 +549,20 @@ class ModelPlugin {
549
549
  cortexRequest.cache = config.get('enableCache') && (pathway.enableCache || pathway.temperature == 0);
550
550
  this.logRequestStart();
551
551
 
552
- const { data: responseData, duration: requestDuration } = await executeRequest(cortexRequest);
552
+ const response = await executeRequest(cortexRequest);
553
553
 
554
+ // Add null check and default values for response
555
+ if (!response) {
556
+ throw new Error('Request failed - no response received');
557
+ }
558
+
559
+ const { data: responseData, duration: requestDuration } = response;
560
+
561
+ // Validate response data
562
+ if (!responseData) {
563
+ throw new Error('Request failed - no data in response');
564
+ }
565
+
554
566
  const errorData = Array.isArray(responseData) ? responseData[0] : responseData;
555
567
  if (errorData && errorData.error) {
556
568
  const newError = new Error(errorData.error.message);
@@ -558,21 +570,32 @@ class ModelPlugin {
558
570
  throw newError;
559
571
  }
560
572
 
561
- this.logAIRequestFinished(requestDuration);
573
+ this.logAIRequestFinished(requestDuration || 0);
562
574
  const parsedData = this.parseResponse(responseData);
563
575
  this.logRequestData(data, parsedData, prompt);
564
576
 
565
577
  return parsedData;
566
578
  } catch (error) {
567
- // Log the error and continue
579
+ // Enhanced error logging
568
580
  const errorMessage = error?.response?.data?.message
569
581
  ?? error?.response?.data?.error?.message
570
582
  ?? error?.message
571
- ?? String(error); // Fallback to string representation
583
+ ?? String(error);
584
+
585
+ // Log the full error details for debugging
572
586
  logger.error(`Error in executeRequest for ${this.pathwayName}: ${errorMessage}`);
587
+ if (error.response) {
588
+ logger.error(`Response status: ${error.response.status}`);
589
+ logger.error(`Response headers: ${JSON.stringify(error.response.headers)}`);
590
+ }
573
591
  if (error.data) {
574
592
  logger.error(`Additional error data: ${JSON.stringify(error.data)}`);
575
593
  }
594
+ if (error.stack) {
595
+ logger.error(`Error stack: ${error.stack}`);
596
+ }
597
+
598
+ // Throw a more informative error
576
599
  throw new Error(`Execution failed for ${this.pathwayName}: ${errorMessage}`);
577
600
  }
578
601
  }
@@ -44,7 +44,7 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
44
44
  if (typeof parsedItem === 'object' && parsedItem !== null) {
45
45
  // Handle both 'image' and 'image_url' types
46
46
  if (parsedItem.type === 'image' || parsedItem.type === 'image_url') {
47
- const url = parsedItem.image_url?.url || parsedItem.url;
47
+ const url = parsedItem.url || parsedItem.image_url?.url;
48
48
  if (url && await this.validateImageUrl(url)) {
49
49
  return { type: 'image_url', image_url: { url } };
50
50
  }