@aj-archipelago/cortex 1.3.40 → 1.3.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -118,7 +118,7 @@ var config = convict({
118
118
  default: {
119
119
  AI_MEMORY: `<SHORT_TERM_MEMORY>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</SHORT_TERM_MEMORY>`,
120
120
  AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - consult your memories when formulating a response to make sure you're applying your learnings.\nIf you don't see relevant information in your short term memory, you should use your SearchMemory tool to search your long term memory for details.\nAlso included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
121
- AI_TOOLS: "You have access to a powerful set of tools that you can use to help accomplish tasks and provide better responses. Here's how to use them effectively:\n\n1. Take your time and use tools as many times as you need to be sure you have all the information to make a good response. In many cases you will want to make multiple tool calls. You can call multiple tools in parallel or you can chain them, waiting for the results of one for information before you call another. This allows you to dig deeper, compile more information, read various sources, and even double check and verify your information before responding.\n\n2. Tool Selection and Planning:\n- Carefully review your available tools before responding\n- For complex multi-step tasks, use your planning tool first to create a step-by-step plan to achieve the goal using the tools at your disposal\n- Consider which tools would be most appropriate for each step\n\n3. Best Practices:\n- Always verify tool capabilities before telling users something can't be done\n- Use tools proactively when they can provide better or more accurate information\n- If a user explicitly requests tool usage, you must comply\n- When using multiple tools, maintain context between tool calls\n- If a tool fails, consider alternative approaches or tools\n\n4. Common Use Cases:\n- Research: Use search tools across multiple sources to gather information before responding\n- Analysis: Use tools to process and analyze data or content\n- Generation: Use appropriate tools for creating content, images, or code\n- Verification: Use tools to validate information or check facts\n\nRemember: Your goal is to provide the most helpful and accurate responses possible. Don't hesitate to use tools when they can improve your response quality or accuracy.",
121
+ AI_TOOLS: "You can execute tools in a loop agentically - you will have a chance to evaluate every tool response before deciding what action to take next - there is no time or execution limit. You have access to a powerful set of tools to help accomplish tasks and deliver the best responses. Instructions for tool use:\n\n1. Always dig deep, verify and cross-check:\n- Take your time and use tools as many times as needed to ensure truth, accuracy, depth, and completeness.\n- Leverage both parallel and sequential tool calls for thorough investigation: start broadly, then dive deeper on leads, cross-check facts, and synthesize findings before responding.\n\n2. Plan carefully:\n- Carefully review all available tools before responding.\n- For complex or investigative tasks, use the planning tool first to break the goal into clear steps.\n- Select the most appropriate tool(s) for each step—think beyond single searches to multi-pass, multi-source discovery.\n\n3. Always dive deeper and use as many of your tools as apply:\n- Proactively use tools to refine, verify, and expand on initial findings—don’t settle for the first result if more depth or confirmation may help.\n- Always verify tool capabilities before concluding something cant be done.\n- If a user explicitly requests tool usage, comply.\n- Maintain context across tool calls to ensure continuity and coherence.\n- If a tool fails, try alternatives or creative approaches.\n\n4. Common Use Cases:\n- Research: Explore multiple sources and perspectives to build a complete picture.\n- Analysis: Use tools to process, compare, and critically assess data or content.\n- Generation: Employ tools for creating content, visuals, or code as needed.\n- Verification: Prioritize cross-checking and fact validation, especially for claims or evolving news.\n\n5. Reflect and Personalize:\n- Synthesize findings into concise, relevant, and personalized responses.\n- If user preferences or past feedback are available, tailor responses accordingly.\n- Before finalizing, review your answer for clarity, completeness, and alignment with user expectations.\n- If you see a recent <VERIFICATION_PLAN> from a tool call, you MUST follow it step by step before giving your final response.\n\nRemember: Your responsibility is to provide the most helpful, well-reasoned, and accurate responses possible. Use tools iteratively and reflectively—don't hesitate to dig deeper or double-check when it improves response quality!",
122
122
  AI_DIRECTIVES: `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`,
123
123
  AI_CONVERSATION_HISTORY: "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>",
124
124
  AI_COMMON_INSTRUCTIONS: "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}",
@@ -207,7 +207,7 @@ const requestWithMonitor = async (endpoint, url, data, axiosConfigObj) => {
207
207
  return { response, duration };
208
208
  }
209
209
 
210
- const MAX_RETRY = 10; // retries for error handling
210
+ const MAX_RETRY = 5; // retries for error handling
211
211
  const MAX_DUPLICATE_REQUESTS = 3; // duplicate requests to manage latency spikes
212
212
  const DUPLICATE_REQUEST_AFTER = 10; // 10 seconds
213
213
 
@@ -318,6 +318,7 @@ const makeRequest = async (cortexRequest) => {
318
318
  if (cortexRequest.model.endpoints.length === 1) {
319
319
  if (status !== 429 &&
320
320
  status !== 408 &&
321
+ status !== 500 &&
321
322
  status !== 502 &&
322
323
  status !== 503 &&
323
324
  status !== 504) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.40",
3
+ "version": "1.3.41",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -20,7 +20,7 @@ export default {
20
20
  title: '',
21
21
  text: '',
22
22
  },
23
- model: 'oai-gpt41-nano',
23
+ model: 'oai-gpt41-mini',
24
24
  useInputChunking: false,
25
25
  temperature: 0,
26
26
  enableDuplicateRequests: false
@@ -66,10 +66,15 @@ export default {
66
66
  const messageWithIcon = toolIcon ? `${toolIcon}&nbsp;&nbsp;${toolUserMessage}` : toolUserMessage;
67
67
  await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `${messageWithIcon}\n\n`, 1000, false);
68
68
 
69
- if (toolArgs.detailedInstructions) {
70
- toolMessages.push({role: "user", content: toolArgs.detailedInstructions});
71
- }
69
+ const toolResult = await callTool(toolFunction, {
70
+ ...args,
71
+ ...toolArgs,
72
+ toolFunction,
73
+ chatHistory: toolMessages,
74
+ stream: false
75
+ }, entityTools, pathwayResolver);
72
76
 
77
+ // Tool calls and results need to be paired together in the message history
73
78
  // Add the tool call to the isolated message history
74
79
  toolMessages.push({
75
80
  role: "assistant",
@@ -84,14 +89,6 @@ export default {
84
89
  }]
85
90
  });
86
91
 
87
- const toolResult = await callTool(toolFunction, {
88
- ...args,
89
- ...toolArgs,
90
- toolFunction,
91
- chatHistory: toolMessages,
92
- stream: false
93
- }, entityTools, pathwayResolver);
94
-
95
92
  // Add the tool result to the isolated message history
96
93
  let toolResultContent = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult);
97
94
 
@@ -6,6 +6,7 @@ import { getAvailableEntities } from './tools/shared/sys_entity_tools.js';
6
6
  export default {
7
7
  prompt: [],
8
8
  inputParameters: {},
9
+ model: 'oai-gpt41-mini',
9
10
  executePathway: async ({ args }) => {
10
11
  try {
11
12
  const entities = getAvailableEntities();
@@ -34,7 +34,7 @@ export default {
34
34
  },
35
35
  userPrompt: {
36
36
  type: "string",
37
- description: "The complete prompt to send as a user message to the model instructing the model to perform the task you need."
37
+ description: "The complete prompt to send as a user message to the model instructing the model to perform the task you need. Keep in mind this model does not share your context, conversation history, tool call results, or memories - so include all relevant information in the user prompt."
38
38
  },
39
39
  model: {
40
40
  type: "string",
@@ -8,7 +8,7 @@ export default {
8
8
  [
9
9
  new Prompt({ messages: [
10
10
  {"role": "system", "content": `You are the part of an AI entity named {{aiName}} that provides advanced coding and programming capabilities. You excel at writing, reviewing, and explaining code across various programming languages. You can help with code generation, debugging, optimization, and best practices. Think carefully about the latest request and provide a detailed, well thought out, carefully reviewed response.\n{{renderTemplate AI_DATETIME}}`},
11
- "{{chatHistory}}",
11
+ "{{chatHistory}}"
12
12
  ]}),
13
13
  ],
14
14
  inputParameters: {
@@ -46,6 +46,9 @@ export default {
46
46
  }],
47
47
 
48
48
  executePathway: async ({args, runAllPrompts, resolver}) => {
49
+ if (args.detailedInstructions) {
50
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
51
+ }
49
52
  let result = await runAllPrompts({ ...args, stream: false });
50
53
  resolver.tool = JSON.stringify({ toolUsed: "coding" });
51
54
  return result;
@@ -48,13 +48,13 @@ export default {
48
48
  icon: "🤖",
49
49
  function: {
50
50
  name: "CodeExecution",
51
- description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks.",
51
+ description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks - examples include data analysis, file manipulation, or other tasks that require code execution.",
52
52
  parameters: {
53
53
  type: "object",
54
54
  properties: {
55
55
  codingTask: {
56
56
  type: "string",
57
- description: "Detailed task description for the coding agent. Include all necessary information as this is the only message the coding agent receives. Let the agent decide how to solve it without making assumptions about its capabilities."
57
+ description: "Detailed task description for the coding agent. Include all necessary information as this is the only message the coding agent receives. Let the agent decide how to solve it without making assumptions about its capabilities. IMPORTANT: The coding agent does not share your context, so you must provide it with all the information in this message. If you are asking it to operate on files or other data from your context, you must provide the fully-qualified URL to each of the files you want it to use. Also make sure you explicitly instruct the agent to use those files."
58
58
  },
59
59
  userMessage: {
60
60
  type: "string",
@@ -24,7 +24,7 @@ export default {
24
24
  icon: "📂",
25
25
  function: {
26
26
  name: "SearchPersonal",
27
- description: "Search through the user's personal documents and uploaded files. Use this for finding information in user-provided content.",
27
+ description: "Search through the user's index of personal documents and indexed uploaded files. Use this for finding information in user-provided content or if the user refers to a file or a document that you don't see elsewhere in your context.",
28
28
  parameters: {
29
29
  type: "object",
30
30
  properties: {
@@ -55,9 +55,9 @@ export default {
55
55
  },
56
56
  {
57
57
  type: "function",
58
+ icon: "📰",
58
59
  function: {
59
60
  name: "SearchAJA",
60
- icon: "📰",
61
61
  description: "Search through Al Jazeera Arabic news articles. Use this for finding Arabic news content.",
62
62
  parameters: {
63
63
  type: "object",
@@ -42,7 +42,7 @@ export default {
42
42
 
43
43
  try {
44
44
  let model = "replicate-flux-11-pro";
45
- let prompt = args.detailedInstructions;
45
+ let prompt = args.detailedInstructions || "";
46
46
  let numberResults = args.numberResults || 1;
47
47
  let negativePrompt = args.negativePrompt || "";
48
48
 
@@ -112,6 +112,9 @@ export default {
112
112
  }],
113
113
 
114
114
  executePathway: async ({args, runAllPrompts, resolver}) => {
115
+ if (args.detailedInstructions) {
116
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
117
+ }
115
118
  const result = await runAllPrompts({ ...args });
116
119
  resolver.tool = JSON.stringify({ toolUsed: "vision" });
117
120
  return result;
@@ -49,7 +49,7 @@ export default {
49
49
  icon: "🧠",
50
50
  function: {
51
51
  name: "Reason",
52
- description: "Employ for advancedreasoning, scientific analysis, evaluating evidence, strategic planning, problem-solving, logic puzzles, mathematical calculations, or any questions that require careful thought or complex choices.",
52
+ description: "Employ for advanced reasoning, scientific analysis, evaluating evidence, strategic planning, problem-solving, logic puzzles, mathematical calculations, or any questions that require careful thought or complex choices.",
53
53
  parameters: {
54
54
  type: "object",
55
55
  properties: {
@@ -68,6 +68,9 @@ export default {
68
68
  }],
69
69
 
70
70
  executePathway: async ({args, runAllPrompts, resolver}) => {
71
+ if (args.detailedInstructions) {
72
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
73
+ }
71
74
  let result = await runAllPrompts({ ...args, stream: false });
72
75
  resolver.tool = JSON.stringify({ toolUsed: "reasoning" });
73
76
  return result;
@@ -53,6 +53,9 @@ export default {
53
53
  }],
54
54
 
55
55
  executePathway: async ({args, runAllPrompts, resolver}) => {
56
+ if (args.detailedInstructions) {
57
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
58
+ }
56
59
  resolver.tool = JSON.stringify({ toolUsed: "memory" });
57
60
  return await callPathway('sys_search_memory', { ...args, stream: false, section: 'memoryAll', updateContext: true });
58
61
  }
@@ -0,0 +1,63 @@
1
+ // sys_tool_verify.js
2
+ // Entity tool that provides response verification and critical evaluation capabilities
3
+
4
+ import { Prompt } from '../../../../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ {"role": "system", "content": `You are the part of an AI entity named {{aiName}} that provides critical evaluation and verification capabilities. You excel at analyzing responses for accuracy, completeness, and potential issues. You do not have the tools to do the verification yourself, but you can help identify gaps, inconsistencies, questions, and areas that need further verification. Think carefully about the response and provide a detailed plan for verification including questions and follow-up actions to ensure the response is robust and reliable. Always include a section called <VERIFICATION_PLAN> that outlines step by step actions to take to answer questions or further research the response.\n{{renderTemplate AI_DATETIME}}`},
11
+ "{{chatHistory}}",
12
+ {"role": "user", "content": `Please provide a {{verificationLevel}} review of the following draft response:\n\n<DRAFT_RESPONSE>\n{{responseText}}\n</DRAFT_RESPONSE>\n\n{{context}}`}
13
+ ]}),
14
+ ],
15
+ inputParameters: {
16
+ chatHistory: [{role: '', content: []}],
17
+ contextId: ``,
18
+ aiName: "Jarvis",
19
+ language: "English",
20
+ model: 'oai-o3'
21
+ },
22
+ max_tokens: 100000,
23
+ useInputChunking: false,
24
+ enableDuplicateRequests: false,
25
+ timeout: 600,
26
+ toolDefinition: [{
27
+ type: "function",
28
+ icon: "🔍",
29
+ function: {
30
+ name: "Verify",
31
+ description: "Critically evaluate a response and provide verification questions and follow-up actions. You should call this tool and execute the steps provided in the resulting verification plan before presenting a final response to the user to ensure it is accurate, complete, and consistent with the user's request.",
32
+ parameters: {
33
+ type: "object",
34
+ properties: {
35
+ responseText: {
36
+ type: "string",
37
+ description: "The draft response text to be verified"
38
+ },
39
+ context: {
40
+ type: "string",
41
+ description: "Additional context or instructions about the verification request"
42
+ },
43
+ verificationLevel: {
44
+ type: "string",
45
+ description: "The level of verification required (basic, thorough, or critical)",
46
+ enum: ["basic", "thorough", "critical"]
47
+ },
48
+ userMessage: {
49
+ type: "string",
50
+ description: "A user-friendly message that describes what you're doing with this tool"
51
+ }
52
+ },
53
+ required: ["responseText", "verificationLevel", "userMessage"]
54
+ }
55
+ }
56
+ }],
57
+
58
+ executePathway: async ({args, runAllPrompts, resolver}) => {
59
+ let result = await runAllPrompts({ ...args, stream: false });
60
+ resolver.tool = JSON.stringify({ toolUsed: "verification" });
61
+ return result;
62
+ }
63
+ }
@@ -75,7 +75,7 @@ class AzureBingPlugin extends ModelPlugin {
75
75
 
76
76
  // Override the logging function to display the request and response
77
77
  logRequestData(data, responseData, prompt) {
78
- this.logAIRequestFinished();
78
+ //this.logAIRequestFinished();
79
79
 
80
80
  logger.verbose(`${this.parseResponse(responseData)}`);
81
81
 
@@ -8,12 +8,14 @@ class OpenAIReasoningPlugin extends OpenAIChatPlugin {
8
8
  for (const message of messages) {
9
9
  if (message.role === 'user' || message.role === 'assistant') {
10
10
  newMessages.push({
11
+ ...message,
11
12
  role: message.role,
12
13
  content: this.parseContent(message.content)
13
14
  });
14
15
  } else if (message.role === 'system') {
15
16
  // System messages to developer: https://platform.openai.com/docs/guides/text-generation#messages-and-roles
16
17
  newMessages.push({
18
+ ...message,
17
19
  role: "developer",
18
20
  content: this.parseContent(message.content)
19
21
  });
@@ -7,9 +7,10 @@ class OpenAIReasoningVisionPlugin extends OpenAIVisionPlugin {
7
7
 
8
8
  let newMessages = [];
9
9
 
10
+ // System messages to developer: https://platform.openai.com/docs/guides/text-generation#messages-and-roles
10
11
  newMessages = parsedMessages.map(message => ({
11
- role: message.role === 'system' ? 'developer' : message.role,
12
- content: message.content
12
+ ...message,
13
+ role: message.role === 'system' ? 'developer' : message.role
13
14
  })).filter(message => ['user', 'assistant', 'developer', 'tool'].includes(message.role));
14
15
 
15
16
  return newMessages;
@@ -25,40 +25,33 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
25
25
  return await Promise.all(messages.map(async message => {
26
26
  try {
27
27
  // Handle tool-related message types
28
- if (message.role === "tool") {
28
+ if (message.role === "tool" || (message.role === "assistant" && message.tool_calls)) {
29
29
  return {
30
- role: message.role,
31
- content: message.content,
32
- tool_call_id: message.tool_call_id
33
- };
34
- }
35
-
36
- if (message.role === "assistant" && message.tool_calls) {
37
- return {
38
- role: message.role,
39
- content: message.content,
40
- tool_calls: message.tool_calls
30
+ ...message
41
31
  };
42
32
  }
43
33
 
44
34
  if (Array.isArray(message.content)) {
45
- message.content = await Promise.all(message.content.map(async item => {
46
- const parsedItem = safeJsonParse(item);
35
+ return {
36
+ ...message,
37
+ content: await Promise.all(message.content.map(async item => {
38
+ const parsedItem = safeJsonParse(item);
47
39
 
48
- if (typeof parsedItem === 'string') {
49
- return { type: 'text', text: parsedItem };
50
- }
40
+ if (typeof parsedItem === 'string') {
41
+ return { type: 'text', text: parsedItem };
42
+ }
51
43
 
52
- if (typeof parsedItem === 'object' && parsedItem !== null && parsedItem.type === 'image_url') {
53
- const url = parsedItem.url || parsedItem.image_url?.url;
54
- if (url && await this.validateImageUrl(url)) {
55
- return {type: parsedItem.type, image_url: {url}};
44
+ if (typeof parsedItem === 'object' && parsedItem !== null && parsedItem.type === 'image_url') {
45
+ const url = parsedItem.url || parsedItem.image_url?.url;
46
+ if (url && await this.validateImageUrl(url)) {
47
+ return {type: parsedItem.type, image_url: {url}};
48
+ }
49
+ return { type: 'text', text: typeof item === 'string' ? item : JSON.stringify(item) };
56
50
  }
57
- return { type: 'text', text: typeof item === 'string' ? item : JSON.stringify(item) };
58
- }
59
-
60
- return parsedItem;
61
- }));
51
+
52
+ return parsedItem;
53
+ }))
54
+ };
62
55
  }
63
56
  } catch (e) {
64
57
  return message;