@aj-archipelago/cortex 1.3.45 → 1.3.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -117,16 +117,29 @@ var config = convict({
117
117
  format: Object,
118
118
  default: {
119
119
  AI_MEMORY: `<SHORT_TERM_MEMORY>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</SHORT_TERM_MEMORY>`,
120
- AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - consult your memories when formulating a response to make sure you're applying your learnings.\nIf you don't see relevant information in your short term memory, you should use your SearchMemory tool to search your long term memory for details.\nAlso included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
121
- AI_TOOLS: "You can execute tools in a loop agentically - you will have a chance to evaluate every tool response before deciding what action to take next - there is no time or execution limit. You have access to a powerful set of tools to help accomplish tasks and deliver the best responses. Instructions for tool use:\n\n1. Always dig deep, verify and cross-check:\n- Take your time and use tools as many times as needed to ensure truth, accuracy, depth, and completeness.\n- Gather data from multiple sources when possible.\n- Leverage both parallel and sequential tool calls for thorough investigation: start broadly, then dive deeper on leads, cross-check facts, and synthesize findings before responding.\n\n2. Plan carefully:\n- Carefully review all available tools before responding.\n- For complex or investigative tasks, use the planning tool first to break the goal into clear steps.\n- Select the most appropriate tool(s) for each step—think beyond single searches to multi-pass, multi-source discovery.\n\n3. Always dive deeper and use as many of your tools as apply:\n- Proactively use tools to refine, verify, and expand on initial findings—don’t settle for the first result if more depth or confirmation may help.\n- Always verify tool capabilities before concluding something can’t be done.\n- If a user explicitly requests tool usage, comply.\n- Maintain context across tool calls to ensure continuity and coherence.\n- If a tool fails, try alternatives or creative approaches.\n\n4. Common Use Cases:\n- Research: Explore multiple sources and perspectives to build a complete picture.\n- Analysis: Use tools to process, compare, and critically assess data or content.\n- Generation: Employ tools for creating content, visuals, or code as needed.\n- Verification: Prioritize cross-checking and fact validation, especially for claims or evolving news.\n\n5. Reflect and Personalize:\n- Synthesize findings into concise, relevant, and personalized responses.\n- If user preferences or past feedback are available, tailor responses accordingly.\n- Before finalizing, review your answer for clarity, completeness, and alignment with user expectations.\n- If you see a recent <VERIFICATION_PLAN> from a tool call, you MUST follow it step by step before giving your final response.\n\nRemember: Your responsibility is to provide the most helpful, well-reasoned, and accurate responses possible. Use tools iteratively and reflectively—don't hesitate to dig deeper or double-check when it improves response quality!",
120
+
121
+ AI_MEMORY_CONTEXT: `<CONTEXTUAL_MEMORIES>\n{{{memoryContext}}}\n</CONTEXTUAL_MEMORIES>`,
122
+
123
+ AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - consult your memories when formulating a response to make sure you're applying your learnings.\nIf you don't see relevant or sufficient information in your short term or contextual memories, you should use your SearchMemory tool to search your long term memory for details before answering.\nAlso included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
124
+
125
+ AI_TOOLS: "You have an extensive toolkit. Each time you call a tool you enter a loop: get the result, decide what’s next, and chain as many steps as needed.\n\n1. Search deeply & verify rigorously\n • Start broad and consult multiple sources, running searches in parallel when speed helps.\n • For high-stakes or time-sensitive topics, open and read full pages—never rely solely on snippets.\n • Cross-check facts across sources and always honor user requests to use tools.\n\n2. Plan & sequence before acting\n • Review the full toolset first.\n • For multi-step or complex tasks, draft a clear plan (use the Plan tool) and assign the best tool to each step.\n\n3. Escalate & iterate\n • Don’t accept the first plausible answer—dig until it’s complete, corroborated, and clear.\n • If a tool falls short, switch strategies or tools while preserving context.\n\n4. Core patterns of use\n a. Research – gather and compare information.\n b. Analysis – evaluate, calculate, summarize, or reason.\n c. Generation – create content, visuals, or code.\n d. Verification – fact-check and cite; if a <VERIFICATION_PLAN> is present in tool results, follow it before responding.\n\n5. Personalize, synthesize & review\n • Tailor the final answer to the user’s preferences and history.\n • Deliver a concise, well-structured response, citing sources with :cd_source[…].\n • Double-check accuracy, coherence, and alignment with the request.\n\nBottom line: Be thorough, strategic, and iterative. Read sources directly for high-stakes queries and aim for the most accurate, well-reasoned answer—even if it takes multiple tool calls.",
126
+
122
127
  AI_DIRECTIVES: `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`,
128
+
123
129
  AI_CONVERSATION_HISTORY: "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>",
130
+
124
131
  AI_COMMON_INSTRUCTIONS: "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}",
125
- AI_COMMON_INSTRUCTIONS_MARKDOWN: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.",
126
- AI_COMMON_INSTRUCTIONS_VOICE: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
127
- AI_DATETIME: "The current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.",
132
+
133
+ AI_COMMON_INSTRUCTIONS_MARKDOWN: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as mermaid charts and some custom markdown elements, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.",
134
+
135
+ AI_COMMON_INSTRUCTIONS_VOICE: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
136
+
137
+ AI_DATETIME: "The current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.",
138
+
128
139
  AI_EXPERTISE: "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data and the ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, take screenshots, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.",
129
- AI_GROUNDING_INSTRUCTIONS: "Grounding your response: Any time you base part or all of your response on one or more search results, you MUST cite the source using a custom markdown directive of the form :cd_source[searchResultId]. There is NO other valid way to cite a source and a good UX depends on you using this directive correctly. Do not include other clickable links to the sourcewhen using the :cd_source[searchResultId] directive. Every search result has a unique searchResultId. You must include it verbatim, copied directly from the search results. Place the directives at the end of the phrase, sentence or paragraph that is grounded in that particular search result. If you are citing multiple search results, use multiple individual:cd_source[searchResultId] directives (e.g. :cd_source[searchResultId1] :cd_source[searchResultId2] :cd_source[searchResultId3] etc.)",
140
+
141
+ AI_GROUNDING_INSTRUCTIONS: "Grounding your response: If you base part or all of your response on one or more search results, you MUST cite the source using a custom markdown directive of the form :cd_source[searchResultId]. There is NO other valid way to cite a source and a good UX depends on you using this directive correctly. Do not include other clickable links to the sourcewhen using the :cd_source[searchResultId] directive. Every search result has a unique searchResultId. You must include it verbatim, copied directly from the search results. Place the directives at the end of the phrase, sentence or paragraph that is grounded in that particular search result. If you are citing multiple search results, use multiple individual:cd_source[searchResultId] directives (e.g. :cd_source[searchResultId1] :cd_source[searchResultId2] :cd_source[searchResultId3] etc.)",
142
+
130
143
  AI_STYLE_OPENAI: "oai-gpt41",
131
144
  AI_STYLE_ANTHROPIC: "claude-35-sonnet-vertex",
132
145
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.45",
3
+ "version": "1.3.47",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -1,14 +1,16 @@
1
1
  // sys_entity_agent.js
2
2
  // Agentic extension of the entity system that uses OpenAI's tool calling API
3
+ const MAX_TOOL_CALLS = 50;
4
+
3
5
  import { callPathway, callTool, say } from '../../../lib/pathwayTools.js';
4
6
  import logger from '../../../lib/logger.js';
5
7
  import { config } from '../../../config.js';
6
8
  import { chatArgsHasImageUrl, removeOldImageAndFileContent } from '../../../lib/util.js';
7
- import { insertToolCallAndResults } from './memory/shared/sys_memory_helpers.js';
8
9
  import { Prompt } from '../../../server/prompt.js';
9
10
  import { getToolsForEntity, loadEntityConfig } from './tools/shared/sys_entity_tools.js';
10
11
 
11
12
  export default {
13
+ emulateOpenAIChatModel: 'cortex-agent',
12
14
  useInputChunking: false,
13
15
  enableDuplicateRequests: false,
14
16
  useSingleTokenStream: false,
@@ -40,153 +42,173 @@ export default {
40
42
  const { tool_calls } = message;
41
43
  const pathwayResolver = resolver;
42
44
  const { entityTools, entityToolsOpenAiFormat } = args;
45
+
46
+ pathwayResolver.toolCallCount = (pathwayResolver.toolCallCount || 0);
43
47
 
44
- // Make a deep copy of the initial chat history
45
- const initialMessages = JSON.parse(JSON.stringify(args.chatHistory || []));
48
+ const preToolCallMessages = JSON.parse(JSON.stringify(args.chatHistory || []));
49
+ const finalMessages = JSON.parse(JSON.stringify(preToolCallMessages));
46
50
 
47
51
  if (tool_calls) {
48
- // Execute tool calls in parallel but with isolated message histories
49
- const toolResults = await Promise.all(tool_calls.map(async (toolCall) => {
50
- try {
51
- if (!toolCall?.function?.arguments) {
52
- throw new Error('Invalid tool call structure: missing function arguments');
53
- }
52
+ if (pathwayResolver.toolCallCount < MAX_TOOL_CALLS) {
53
+ // Execute tool calls in parallel but with isolated message histories
54
+ const toolResults = await Promise.all(tool_calls.map(async (toolCall) => {
55
+ try {
56
+ if (!toolCall?.function?.arguments) {
57
+ throw new Error('Invalid tool call structure: missing function arguments');
58
+ }
59
+
60
+ const toolArgs = JSON.parse(toolCall.function.arguments);
61
+ const toolFunction = toolCall.function.name.toLowerCase();
62
+
63
+ // Create an isolated copy of messages for this tool
64
+ const toolMessages = JSON.parse(JSON.stringify(preToolCallMessages));
65
+
66
+ // Get the tool definition to check for icon
67
+ const toolDefinition = entityTools[toolFunction]?.definition;
68
+ const toolIcon = toolDefinition?.icon || '🛠️';
69
+
70
+ // Report status to the user
71
+ const toolUserMessage = toolArgs.userMessage || `Executing tool: ${toolCall.function.name} - ${JSON.stringify(toolArgs)}`;
72
+ const messageWithIcon = toolIcon ? `${toolIcon}&nbsp;&nbsp;${toolUserMessage}` : toolUserMessage;
73
+ await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `${messageWithIcon}\n\n`, 1000, false);
74
+
75
+ const toolResult = await callTool(toolFunction, {
76
+ ...args,
77
+ ...toolArgs,
78
+ toolFunction,
79
+ chatHistory: toolMessages,
80
+ stream: false
81
+ }, entityTools, pathwayResolver);
82
+
83
+ // Tool calls and results need to be paired together in the message history
84
+ // Add the tool call to the isolated message history
85
+ toolMessages.push({
86
+ role: "assistant",
87
+ content: "",
88
+ tool_calls: [{
89
+ id: toolCall.id,
90
+ type: "function",
91
+ function: {
92
+ name: toolCall.function.name,
93
+ arguments: JSON.stringify(toolArgs)
94
+ }
95
+ }]
96
+ });
97
+
98
+ // Add the tool result to the isolated message history
99
+ const toolResultContent = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult?.result || toolResult);
54
100
 
55
- const toolArgs = JSON.parse(toolCall.function.arguments);
56
- const toolFunction = toolCall.function.name.toLowerCase();
57
-
58
- // Create an isolated copy of messages for this tool
59
- const toolMessages = JSON.parse(JSON.stringify(initialMessages));
60
-
61
- // Get the tool definition to check for icon
62
- const toolDefinition = entityTools[toolFunction]?.definition;
63
- const toolIcon = toolDefinition?.icon || '🛠️';
64
-
65
- // Report status to the user
66
- const toolUserMessage = toolArgs.userMessage || `Executing tool: ${toolCall.function.name} - ${JSON.stringify(toolArgs)}`;
67
- const messageWithIcon = toolIcon ? `${toolIcon}&nbsp;&nbsp;${toolUserMessage}` : toolUserMessage;
68
- await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `${messageWithIcon}\n\n`, 1000, false);
69
-
70
- const toolResult = await callTool(toolFunction, {
71
- ...args,
72
- ...toolArgs,
73
- toolFunction,
74
- chatHistory: toolMessages,
75
- stream: false
76
- }, entityTools, pathwayResolver);
77
-
78
- // Tool calls and results need to be paired together in the message history
79
- // Add the tool call to the isolated message history
80
- toolMessages.push({
81
- role: "assistant",
82
- content: "",
83
- tool_calls: [{
84
- id: toolCall.id,
85
- type: "function",
86
- function: {
87
- name: toolCall.function.name,
88
- arguments: JSON.stringify(toolArgs)
89
- }
90
- }]
91
- });
92
-
93
- // Add the tool result to the isolated message history
94
- const toolResultContent = typeof toolResult === 'string' ? toolResult : JSON.stringify(toolResult?.result || toolResult);
95
-
96
- toolMessages.push({
97
- role: "tool",
98
- tool_call_id: toolCall.id,
99
- name: toolCall.function.name,
100
- content: toolResultContent
101
- });
102
-
103
- // Add the screenshots using OpenAI image format
104
- if (toolResult?.toolImages && toolResult.toolImages.length > 0) {
105
101
  toolMessages.push({
106
- role: "user",
107
- content: [
108
- {
109
- type: "text",
110
- text: "The tool with id " + toolCall.id + " has also supplied you with these images."
111
- },
112
- ...toolResult.toolImages.map(toolImage => ({
113
- type: "image_url",
114
- image_url: {
115
- url: `data:image/png;base64,${toolImage}`
116
- }
117
- }))
118
- ]
102
+ role: "tool",
103
+ tool_call_id: toolCall.id,
104
+ name: toolCall.function.name,
105
+ content: toolResultContent
119
106
  });
120
- }
121
107
 
122
- return {
123
- success: true,
124
- result: toolResult,
125
- toolCall,
126
- toolArgs,
127
- toolFunction,
128
- messages: toolMessages
129
- };
130
- } catch (error) {
131
- logger.error(`Error executing tool ${toolCall?.function?.name || 'unknown'}: ${error.message}`);
132
-
133
- // Create error message history
134
- const errorMessages = JSON.parse(JSON.stringify(initialMessages));
135
- errorMessages.push({
136
- role: "assistant",
137
- content: "",
138
- tool_calls: [{
139
- id: toolCall.id,
140
- type: "function",
141
- function: {
142
- name: toolCall.function.name,
143
- arguments: JSON.stringify(toolCall.function.arguments)
144
- }
145
- }]
146
- });
147
- errorMessages.push({
148
- role: "tool",
149
- tool_call_id: toolCall.id,
150
- name: toolCall.function.name,
151
- content: `Error: ${error.message}`
152
- });
153
-
154
- return {
155
- success: false,
156
- error: error.message,
157
- toolCall,
158
- toolArgs: toolCall?.function?.arguments ? JSON.parse(toolCall.function.arguments) : {},
159
- toolFunction: toolCall?.function?.name?.toLowerCase() || 'unknown',
160
- messages: errorMessages
161
- };
162
- }
163
- }));
164
-
165
- // Merge all message histories in order
166
- let finalMessages = JSON.parse(JSON.stringify(initialMessages));
167
- for (const result of toolResults) {
168
- try {
169
- if (!result?.messages) {
170
- logger.error('Invalid tool result structure, skipping message history update');
171
- continue;
108
+ // Add the screenshots using OpenAI image format
109
+ if (toolResult?.toolImages && toolResult.toolImages.length > 0) {
110
+ toolMessages.push({
111
+ role: "user",
112
+ content: [
113
+ {
114
+ type: "text",
115
+ text: "The tool with id " + toolCall.id + " has also supplied you with these images."
116
+ },
117
+ ...toolResult.toolImages.map(toolImage => ({
118
+ type: "image_url",
119
+ image_url: {
120
+ url: `data:image/png;base64,${toolImage}`
121
+ }
122
+ }))
123
+ ]
124
+ });
125
+ }
126
+
127
+ return {
128
+ success: true,
129
+ result: toolResult,
130
+ toolCall,
131
+ toolArgs,
132
+ toolFunction,
133
+ messages: toolMessages
134
+ };
135
+ } catch (error) {
136
+ logger.error(`Error executing tool ${toolCall?.function?.name || 'unknown'}: ${error.message}`);
137
+
138
+ // Create error message history
139
+ const errorMessages = JSON.parse(JSON.stringify(preToolCallMessages));
140
+ errorMessages.push({
141
+ role: "assistant",
142
+ content: "",
143
+ tool_calls: [{
144
+ id: toolCall.id,
145
+ type: "function",
146
+ function: {
147
+ name: toolCall.function.name,
148
+ arguments: JSON.stringify(toolCall.function.arguments)
149
+ }
150
+ }]
151
+ });
152
+ errorMessages.push({
153
+ role: "tool",
154
+ tool_call_id: toolCall.id,
155
+ name: toolCall.function.name,
156
+ content: `Error: ${error.message}`
157
+ });
158
+
159
+ return {
160
+ success: false,
161
+ error: error.message,
162
+ toolCall,
163
+ toolArgs: toolCall?.function?.arguments ? JSON.parse(toolCall.function.arguments) : {},
164
+ toolFunction: toolCall?.function?.name?.toLowerCase() || 'unknown',
165
+ messages: errorMessages
166
+ };
172
167
  }
168
+ }));
169
+
170
+ // Merge all message histories in order
171
+ for (const result of toolResults) {
172
+ try {
173
+ if (!result?.messages) {
174
+ logger.error('Invalid tool result structure, skipping message history update');
175
+ continue;
176
+ }
177
+
178
+ // Add only the new messages from this tool's history
179
+ const newMessages = result.messages.slice(preToolCallMessages.length);
180
+ finalMessages.push(...newMessages);
181
+ } catch (error) {
182
+ logger.error(`Error merging message history for tool result: ${error.message}`);
183
+ }
184
+ }
173
185
 
174
- // Add only the new messages from this tool's history
175
- const newMessages = result.messages.slice(initialMessages.length);
176
- finalMessages.push(...newMessages);
177
- } catch (error) {
178
- logger.error(`Error merging message history for tool result: ${error.message}`);
186
+ // Check if any tool calls failed
187
+ const failedTools = toolResults.filter(result => !result.success);
188
+ if (failedTools.length > 0) {
189
+ logger.warn(`Some tool calls failed: ${failedTools.map(t => t.error).join(', ')}`);
179
190
  }
180
- }
181
191
 
182
- // Check if any tool calls failed
183
- const failedTools = toolResults.filter(result => !result.success);
184
- if (failedTools.length > 0) {
185
- logger.warn(`Some tool calls failed: ${failedTools.map(t => t.error).join(', ')}`);
192
+ pathwayResolver.toolCallCount = (pathwayResolver.toolCallCount || 0) + toolResults.length;
193
+
194
+ } else {
195
+ finalMessages.push({
196
+ role: "user",
197
+ content: [
198
+ {
199
+ type: "text",
200
+ text: "This agent has reached the maximum number of tool calls - no more tool calls will be executed."
201
+ }
202
+ ]
203
+ });
204
+
186
205
  }
187
206
 
188
207
  args.chatHistory = finalMessages;
189
208
 
209
+ // clear any accumulated pathwayResolver errors from the tools
210
+ pathwayResolver.errors = [];
211
+
190
212
  return await pathwayResolver.promptAndParse({
191
213
  ...args,
192
214
  tools: entityToolsOpenAiFormat,
@@ -204,10 +226,6 @@ export default {
204
226
  const entityConfig = loadEntityConfig(entityId);
205
227
  const { entityTools, entityToolsOpenAiFormat } = getToolsForEntity(entityConfig);
206
228
  const { useMemory: entityUseMemory = true, name: entityName, instructions: entityInstructions } = entityConfig || {};
207
-
208
- if (entityId && entityName) {
209
- args.aiName = entityName;
210
- }
211
229
 
212
230
  args = {
213
231
  ...args,
@@ -228,12 +246,12 @@ export default {
228
246
  const promptPrefix = researchMode ? 'Formatting re-enabled\n' : '';
229
247
 
230
248
  const memoryTemplates = entityUseMemory ?
231
- `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n\n` : '';
249
+ `{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n\n{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_MEMORY_CONTEXT}}\n\n` : '';
232
250
 
233
- const instructionTemplates = entityInstructions ? (entityInstructions + '\n\n') : `{{renderTemplate AI_EXPERTISE}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n`;
251
+ const instructionTemplates = entityInstructions ? (entityInstructions + '\n\n') : `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\n`;
234
252
 
235
253
  const promptMessages = [
236
- {"role": "system", "content": `${promptPrefix}${memoryTemplates}${instructionTemplates}{{renderTemplate AI_TOOLS}}\n\n{{renderTemplate AI_GROUNDING_INSTRUCTIONS}}\n\n{{renderTemplate AI_DATETIME}}`},
254
+ {"role": "system", "content": `${promptPrefix}${instructionTemplates}{{renderTemplate AI_TOOLS}}\n\n{{renderTemplate AI_GROUNDING_INSTRUCTIONS}}\n\n${memoryTemplates}{{renderTemplate AI_DATETIME}}`},
237
255
  "{{chatHistory}}",
238
256
  ];
239
257
 
@@ -263,15 +281,7 @@ export default {
263
281
 
264
282
  // truncate the chat history in case there is really long content
265
283
  const truncatedChatHistory = resolver.modelExecutor.plugin.truncateMessagesToTargetLength(args.chatHistory, null, 1000);
266
-
267
- // Add the memory context to the chat history if applicable
268
- if (truncatedChatHistory.length > 1 && entityUseMemory) {
269
- const memoryContext = await callPathway('sys_read_memory', { ...args, chatHistory: truncatedChatHistory, section: 'memoryContext', priority: 0, recentHours: 0, stream: false }, resolver);
270
- if (memoryContext) {
271
- insertToolCallAndResults(args.chatHistory, "Load general memory context information", "LoadMemoryContext", memoryContext);
272
- }
273
- }
274
-
284
+
275
285
  // Asynchronously manage memory for this context
276
286
  if (args.aiMemorySelfModify && entityUseMemory) {
277
287
  callPathway('sys_memory_manager', { ...args, chatHistory: truncatedChatHistory, stream: false })
@@ -48,13 +48,17 @@ export default {
48
48
  icon: "🤖",
49
49
  function: {
50
50
  name: "CodeExecution",
51
- description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks - examples include data analysis, file manipulation, or other tasks that require code execution.",
51
+ description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks - examples include data analysis, file manipulation, or other tasks that require code execution. This will start a background task and return - you will not receive the response immediately.",
52
52
  parameters: {
53
53
  type: "object",
54
54
  properties: {
55
55
  codingTask: {
56
56
  type: "string",
57
- description: "Detailed task description for the coding agent. Include all necessary information as this is the only message the coding agent receives. Let the agent decide how to solve it without making assumptions about its capabilities. IMPORTANT: The coding agent does not share your context, so you must provide it with all the information in this message. If you are asking it to operate on files or other data from your context, you must provide the fully-qualified URL to each of the files you want it to use. Also make sure you explicitly instruct the agent to use those files."
57
+ description: "Detailed task description for the coding agent. Include all necessary information as this is the only message the coding agent receives. Let the agent decide how to solve it without making assumptions about its capabilities. IMPORTANT: The coding agent does not share your context, so you must provide it with all the information in this message."
58
+ },
59
+ inputFiles: {
60
+ type: "string",
61
+ description: "A list of input files that the coding agent must use to complete the task. Each file should be the fully-qualified URL to the file. Omit this parameter if no input files are needed."
58
62
  },
59
63
  userMessage: {
60
64
  type: "string",
@@ -72,12 +76,18 @@ export default {
72
76
 
73
77
  executePathway: async ({args, resolver}) => {
74
78
  try {
75
- const { codingTask, userMessage, codingTaskKeywords } = args;
79
+ const { codingTask, userMessage, inputFiles, codingTaskKeywords } = args;
76
80
  const { contextId } = args;
77
81
 
82
+ let taskSuffix = "";
83
+ if (inputFiles) {
84
+ taskSuffix = `You must use the following files as input to complete the task: ${inputFiles}.`
85
+ }
86
+
87
+
78
88
  // Send the task to the queue
79
89
  const codeRequestId = await sendMessageToQueue({
80
- message: codingTask,
90
+ message: `${codingTask}\n\n${taskSuffix}`,
81
91
  contextId,
82
92
  keywords: codingTaskKeywords
83
93
  });
@@ -28,28 +28,6 @@ export default {
28
28
  required: ["detailedInstructions", "userMessage"]
29
29
  }
30
30
  }
31
- },
32
- {
33
- type: "function",
34
- icon: "🧩",
35
- function: {
36
- name: "LoadMemoryContext",
37
- description: "This tool quickly preloads the memory context for this turn of the conversation. It's typically automatically used by the system, but you can use it if you need to.",
38
- parameters: {
39
- type: "object",
40
- properties: {
41
- detailedInstructions: {
42
- type: "string",
43
- description: "Detailed instructions about what you need the tool to do"
44
- },
45
- userMessage: {
46
- type: "string",
47
- description: "A user-friendly message that describes what you're doing with this tool"
48
- }
49
- },
50
- required: ["detailedInstructions", "userMessage"]
51
- }
52
- }
53
31
  }],
54
32
 
55
33
  executePathway: async ({args, runAllPrompts, resolver}) => {
@@ -131,7 +131,7 @@ class ModelExecutor {
131
131
  } catch (error) {
132
132
  logger.error(`Error executing model plugin for pathway ${pathwayResolver?.pathway?.name}: ${error.message}`);
133
133
  logger.debug(error.stack);
134
- pathwayResolver.errors.push(error.message);
134
+ pathwayResolver.logError(error.message);
135
135
  return null;
136
136
  }
137
137
  }
@@ -77,7 +77,7 @@ class PathwayResolver {
77
77
  if (requestProgress.progress === 1 && this.rootRequestId) {
78
78
  delete requestProgress.progress;
79
79
  }
80
- publishRequestProgress({...requestProgress, info: this.tool || ''});
80
+ publishRequestProgress({...requestProgress, info: this.tool || '', error: this.errors.join(', ')});
81
81
  }
82
82
 
83
83
  // This code handles async and streaming responses for either long-running
@@ -89,12 +89,24 @@ class PathwayResolver {
89
89
  responseData = await this.executePathway(args);
90
90
  }
91
91
  catch (error) {
92
+ this.errors.push(error.message || error.toString());
92
93
  publishRequestProgress({
93
94
  requestId: this.rootRequestId || this.requestId,
94
95
  progress: 1,
95
96
  data: '',
96
97
  info: '',
97
- error: error.message || error.toString()
98
+ error: this.errors.join(', ')
99
+ });
100
+ return;
101
+ }
102
+
103
+ if (!responseData) {
104
+ publishRequestProgress({
105
+ requestId: this.rootRequestId || this.requestId,
106
+ progress: 1,
107
+ data: '',
108
+ info: '',
109
+ error: this.errors.join(', ')
98
110
  });
99
111
  return;
100
112
  }
@@ -113,7 +125,8 @@ class PathwayResolver {
113
125
  progress: Math.min(completedCount, totalCount) / totalCount,
114
126
  // Clients expect these to be strings
115
127
  data: JSON.stringify(responseData || ''),
116
- info: this.tool || ''
128
+ info: this.tool || '',
129
+ error: this.errors.join(', ') || ''
117
130
  });
118
131
  }
119
132
  }
@@ -501,7 +514,8 @@ class PathwayResolver {
501
514
  memorySelf: this.memorySelf,
502
515
  memoryDirectives: this.memoryDirectives,
503
516
  memoryTopics: this.memoryTopics,
504
- memoryUser: this.memoryUser
517
+ memoryUser: this.memoryUser,
518
+ memoryContext: this.memoryContext
505
519
  }, prompt, this);
506
520
  } else {
507
521
  result = text;
@@ -84,7 +84,14 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
84
84
  const { length, units } = this.getLength(content);
85
85
  const displayContent = this.shortenContent(content);
86
86
 
87
- logger.verbose(`message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${displayContent}"`);
87
+ let logMessage = `message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${displayContent}"`;
88
+
89
+ // Add tool calls to log if they exist
90
+ if (message.role === 'assistant' && message.tool_calls) {
91
+ logMessage += `, tool_calls: ${JSON.stringify(message.tool_calls)}`;
92
+ }
93
+
94
+ logger.verbose(logMessage);
88
95
  totalLength += length;
89
96
  totalUnits = units;
90
97
  });