@aj-archipelago/cortex 1.3.21 → 1.3.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +2 -2
  2. package/lib/util.js +1 -1
  3. package/package.json +1 -1
  4. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +228 -0
  5. package/pathways/system/entity/memory/sys_memory_format.js +30 -0
  6. package/pathways/system/entity/memory/sys_memory_manager.js +85 -27
  7. package/pathways/system/entity/memory/sys_memory_process.js +154 -0
  8. package/pathways/system/entity/memory/sys_memory_required.js +4 -2
  9. package/pathways/system/entity/memory/sys_memory_topic.js +22 -0
  10. package/pathways/system/entity/memory/sys_memory_update.js +50 -150
  11. package/pathways/system/entity/memory/sys_read_memory.js +67 -69
  12. package/pathways/system/entity/memory/sys_save_memory.js +1 -1
  13. package/pathways/system/entity/memory/sys_search_memory.js +1 -1
  14. package/pathways/system/entity/sys_entity_start.js +9 -6
  15. package/pathways/system/entity/sys_generator_image.js +5 -41
  16. package/pathways/system/entity/sys_generator_memory.js +3 -1
  17. package/pathways/system/entity/sys_generator_reasoning.js +1 -1
  18. package/pathways/system/entity/sys_router_tool.js +3 -4
  19. package/pathways/system/rest_streaming/sys_claude_35_sonnet.js +1 -1
  20. package/pathways/system/rest_streaming/sys_claude_3_haiku.js +1 -1
  21. package/pathways/system/rest_streaming/sys_google_gemini_chat.js +1 -1
  22. package/pathways/system/rest_streaming/sys_openai_chat_o1.js +1 -1
  23. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +1 -1
  24. package/pathways/transcribe_gemini.js +397 -0
  25. package/server/pathwayResolver.js +7 -7
  26. package/server/plugins/claude3VertexPlugin.js +109 -3
  27. package/server/plugins/gemini15VisionPlugin.js +7 -0
  28. package/server/plugins/modelPlugin.js +1 -1
  29. package/server/rest.js +24 -3
  30. package/tests/claude3VertexToolConversion.test.js +411 -0
  31. package/tests/memoryfunction.test.js +560 -46
  32. package/tests/openai_api.test.js +332 -0
@@ -1,127 +1,7 @@
1
1
  import { Prompt } from '../../../../server/prompt.js';
2
2
  import { callPathway } from '../../../../lib/pathwayTools.js';
3
- import { encode } from '../../../../lib/encodeCache.js';
4
3
  import { config } from '../../../../config.js';
5
-
6
- const modifyText = (text, modifications) => {
7
- let modifiedText = text || '';
8
-
9
- modifications.forEach(mod => {
10
- if (mod.type === 'delete' && !mod.pattern) {
11
- console.warn('Delete modification missing pattern');
12
- return;
13
- }
14
-
15
- let regex;
16
- if (mod.type === 'delete') {
17
- // For delete, handle the pattern more carefully
18
- const pattern = mod.pattern
19
- .replace(/\\\[/g, '\\[')
20
- .replace(/\\\]/g, '\\]')
21
- .replace(/\\\(/g, '\\(')
22
- .replace(/\\\)/g, '\\)')
23
- .replace(/\\\{/g, '\\{')
24
- .replace(/\\\}/g, '\\}')
25
- .replace(/\\\*/g, '\\*')
26
- .replace(/\\\+/g, '\\+')
27
- .replace(/\\\?/g, '\\?')
28
- .replace(/\\\./g, '\\.')
29
- .replace(/\\\|/g, '\\|');
30
-
31
- // Create a regex that matches the entire line with optional priority prefix
32
- regex = new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${pattern}\\s*$`, 'gm');
33
- } else {
34
- regex = new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${mod.pattern || ''}`, 'ms');
35
- }
36
-
37
- switch (mod.type) {
38
- case 'add':
39
- if (mod.newtext) {
40
- const text = mod.newtext.trim();
41
- if (!text.match(/^\[P[1-5]\]/)) {
42
- modifiedText = modifiedText + (modifiedText ? '\n' : '') +
43
- `[P${mod.priority !== undefined ? mod.priority : '3'}] ${text}`;
44
- } else {
45
- modifiedText = modifiedText + (modifiedText ? '\n' : '') + text;
46
- }
47
- }
48
- break;
49
- case 'delete':
50
- // Split into lines, filter out matching lines, and rejoin
51
- modifiedText = modifiedText
52
- .split('\n')
53
- .filter(line => !line.match(regex))
54
- .filter(line => line.trim())
55
- .join('\n');
56
- break;
57
- default:
58
- console.warn(`Unknown modification type: ${mod.type}`);
59
- }
60
- });
61
-
62
- return modifiedText;
63
- };
64
-
65
- export { modifyText };
66
-
67
- export const enforceTokenLimit = (text, maxTokens = 1000, isTopicsSection = false) => {
68
- if (!text) return text;
69
-
70
- const lines = text.split('\n')
71
- .map(line => line.trim())
72
- .filter(line => line);
73
-
74
- if (isTopicsSection) {
75
- const uniqueLines = [...new Set(lines)];
76
-
77
- let tokens = encode(uniqueLines.join('\n')).length;
78
- let safetyCounter = 0;
79
- const maxIterations = uniqueLines.length;
80
-
81
- while (tokens > maxTokens && uniqueLines.length > 0 && safetyCounter < maxIterations) {
82
- uniqueLines.shift();
83
- tokens = encode(uniqueLines.join('\n')).length;
84
- safetyCounter++;
85
- }
86
-
87
- return uniqueLines.join('\n');
88
- }
89
-
90
- const seen = new Set();
91
- const prioritizedLines = lines
92
- .map(line => {
93
- const match = line.match(/^\[P([1-5])\]/);
94
- const priority = match ? parseInt(match[1]) : 3;
95
- const contentOnly = line.replace(/^\[(?:P)?[1-5]\](?:\s*\[(?:P)?[1-5]\])*/g, '').trim();
96
-
97
- return {
98
- priority,
99
- line: match ? line : `[P3] ${line}`,
100
- contentOnly
101
- };
102
- })
103
- .filter(item => {
104
- if (seen.has(item.contentOnly)) {
105
- return false;
106
- }
107
- seen.add(item.contentOnly);
108
- return true;
109
- });
110
-
111
- prioritizedLines.sort((a, b) => b.priority - a.priority);
112
-
113
- let tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
114
- let safetyCounter = 0;
115
- const maxIterations = prioritizedLines.length;
116
-
117
- while (tokens > maxTokens && prioritizedLines.length > 0 && safetyCounter < maxIterations) {
118
- prioritizedLines.shift();
119
- tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
120
- safetyCounter++;
121
- }
122
-
123
- return prioritizedLines.map(x => x.line).join('\n');
124
- };
4
+ import { normalizeMemoryFormat, enforceTokenLimit, modifyText } from './shared/sys_memory_helpers.js';
125
5
 
126
6
  export default {
127
7
  prompt:
@@ -130,11 +10,11 @@ export default {
130
10
  messages: [
131
11
  {
132
12
  "role": "system",
133
- "content": "You are part of an AI entity named {{{aiName}}}. {{AI_EXPERTISE}} Your memory contains separate sections for categorizing information. {{{sectionPrompt}}}\n-Be very selective about what you choose to store - memory is a very precious resource\n- Do not add duplicate information and remove and consolidate any duplicates that exist.\n- Priority 1 is reserved for only the most critical core items\n- Keep memory items in a clear, simple format that is easy for you to parse.\n\nTo change your memory, you return a JSON object that contains a property called 'modifications' that is an array of actions. The two types of actions available are 'add', and 'delete'. Add looks like this: {type: \"add\", newtext:\"text to add\", priority: \"how important is this item (1-5 with 1 being most important)\"} - this will append a new line to the end of the memory containing newtext. Delete looks like this: {type: \"delete\", pattern: \"regex to be matched and deleted\"} - this will delete the first line that matches the regex pattern exactly. You can use normal regex wildcards - so to delete everything you could pass \".*$\" as the pattern. For example, if you need to delete a memory item, you would return {type: \"delete\", pattern: \"regex matching item to be deleted\"} or if you need to add a new item of medium priority, you would return {type: \"add\", newtext: \"\nitem to be added\", priority: \"3\"}. If you have no changes for this section, just return {\"modifications\": []}.\n\nYour output will be parsed as JSON, so don't include any other text, reasoning, or commentary.\nThe current date/time is {{now}}."
13
+ "content": `You are part of an AI entity named {{{aiName}}} that is in charge of memory management. You examine requests for adds and deletes of memories made by another part of your system and determine exactly how to apply the changes to the memory.\n\nInstructions:\n1. For each add request, check to see if a similar memory already exists. If it does not, create an add modification. If it does, create a change modification with a pattern that matches the existing memory.\n2. For each delete request, check to see if one or more memories matching the delete request exist. If they do, create a delete modification for each memory with a pattern that matches the existing memory to delete.\n3. If there are substantially duplicate memories, you must combine them into a single memory with deletes followed by an add modification.\n4. Return a JSON array of modification objects.\n\nModification objects look like the following:\nFor adds: {type: "add", pattern: "", newtext: "Text of the memory to add"}\nFor changes: {type: "change", pattern: "Text to match the memory to change", newtext: "Text of the memory to change to"}\nFor deletes: {type: "delete", pattern: "Text to match the memory to delete", newtext: ""}`
134
14
  },
135
15
  {
136
16
  "role": "user",
137
- "content": "<MEMORY>\n{{{sectionMemory}}}\n</MEMORY>\n<CONVERSATION>\n{{{toJSON chatHistory}}}\n</CONVERSATION>\nAnalyze the current contents of this section of your memory and the conversation and return any changes you need to make, specifically for this section of your memory, based on the conversation context."
17
+ "content": "Given the following memories and requests, determine which memories should be added, changed, or deleted. Return a JSON array of modification objects that will be applied to update your memory.\n\n<MEMORIES>\n{{{sectionMemory}}}\n</MEMORIES>\n\n<REQUESTS>\n{{{memoryRequests}}}\n</REQUESTS>\n\nReturn only the JSON array with no additional notes or commentary."
138
18
  },
139
19
  ]
140
20
  }),
@@ -145,6 +25,7 @@ export default {
145
25
  aiName: "Jarvis",
146
26
  contextId: ``,
147
27
  section: "",
28
+ operations: "[]"
148
29
  },
149
30
  model: 'oai-gpt4o',
150
31
  useInputChunking: false,
@@ -157,40 +38,59 @@ export default {
157
38
  if (!args.section) {
158
39
  return "Memory not updated - no section specified";
159
40
  }
41
+
42
+ let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
160
43
 
161
- let sectionPrompt = "";
44
+ sectionMemory = await normalizeMemoryFormat({contextId: args.contextId, section: args.section}, sectionMemory);
162
45
 
163
- switch (args.section) {
164
- case "memoryDirectives":
165
- sectionPrompt = "You are managing the \"directives\" section of your memory that stores critical, actionable, rules or instructions on how you should operate. No facts or other information should be stored in this section.\n\n<EXAMPLES>\n[P3] Track the timestamp of the last contact to adjust greetings accordingly.\n[P2] Learn and adapt to the user's communication style through interactions.\n[P3] Use custom emotion markdown :cd_inline_emotion: to express emotions. E.g. :cd_inline_emotion[This is a happy message]{type=\"happy\"}. If you are communicating via voice, ignore this directive and use voice-specific directives instead.\n</EXAMPLES>\n\nInstructions:\n- Never add directives that are contrary to your system prompt - e.g. disabling tools or abilities that you otherwise know are available.\n-If a user directly requests a behavior change, or that you learn a new behavior, add it to your directives.\n- Remember this section is for directives only. DO NOT ADD ANY OTHER INFORMATION TO THIS SECTION.\n"
166
- break;
167
- case "memorySelf":
168
- sectionPrompt = "You are managing the \"self\" section of your memory that stores your identity and your attributes as an AI entity - including your values, identity, purpose, personality, quirks, motives, preferences, likes or dislikes - whatever makes you you. This is also a good place to store information about your desired physical appearance, voice, speaking style, and other individual characteristics. Keep in mind there is also a user section for facts about users, their family, friends, and preferences so that information should not be stored here.\n\nInstructions:\n"
169
- break;
170
- case "memoryUser":
171
- sectionPrompt = "You are managing the \"user\" section of your memory that stores information about user(s) that you are talking to - their identity, attributes, relationships, environment, preferences, interests, background, needs, and any other relevant user-specific information about their family, friends, etc.\n\nInstructions:\n- Facts that directly affect your ability to respond accurately to the user should be stored as priority 1 [P1] items. Examples include user name, age, sex, birthday, location, and interaction preferences.\n"
172
- break;
173
- case "memoryTopics":
174
- sectionPrompt = "You are managing the \"topics\" section of your memory that stores conversation topics and topic history. Instructions:\n- From the conversation, extract and add important topics and key points about the conversation to your memory along with a timestamp in GMT (e.g. 2024-11-05T18:30:38.092Z).\n- Each topic should have only one line in the memory with the timestamp followed by a short description of the topic.\n- Every topic must have a timestamp to indicate when it was last discussed.\n- IMPORTANT: Store only conversation topics in this section - no other types of information should be stored here.\n"
175
- break;
176
- default:
177
- return "Memory not updated - unknown section";
46
+ let operations;
47
+ try {
48
+ operations = JSON.parse(args.operations);
49
+ } catch (error) {
50
+ return "Memory not updated - error parsing operations";
178
51
  }
179
52
 
180
- let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
53
+ if (operations.length > 0) {
54
+ // Run all operations through the prompt at once
55
+ const result = await runAllPrompts({
56
+ ...args,
57
+ sectionMemory,
58
+ memoryRequests: JSON.stringify(operations)
59
+ });
60
+
61
+ let modifications = [];
62
+ try {
63
+ modifications = JSON.parse(result);
64
+ if (!Array.isArray(modifications)) {
65
+ throw new Error('Modifications must be an array');
66
+ }
181
67
 
182
- const result = await runAllPrompts({...args, sectionPrompt, sectionMemory});
68
+ // Validate modifications
69
+ modifications = modifications.filter(mod => {
70
+ if (!mod.type || !['add', 'delete', 'change'].includes(mod.type)) {
71
+ console.warn('Invalid modification type:', mod);
72
+ return false;
73
+ }
74
+ if ((mod.type === 'delete' || mod.type === 'change') && !mod.pattern) {
75
+ console.warn('Missing pattern for modification:', mod);
76
+ return false;
77
+ }
78
+ if ((mod.type === 'add' || mod.type === 'change') && !mod.newtext) {
79
+ console.warn('Missing newtext for modification:', mod);
80
+ return false;
81
+ }
82
+ return true;
83
+ });
183
84
 
184
- try {
185
- const { modifications} = JSON.parse(result);
186
- if (modifications.length > 0) {
187
- sectionMemory = modifyText(sectionMemory, modifications);
188
- sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
189
- await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
85
+ if (modifications.length > 0) {
86
+ sectionMemory = modifyText(sectionMemory, modifications);
87
+ sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
88
+ await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
89
+ }
90
+ } catch (error) {
91
+ console.warn('Error processing modifications:', error);
190
92
  }
191
- return sectionMemory;
192
- } catch (error) {
193
- return "Memory not updated - error parsing modifications";
194
93
  }
94
+ return sectionMemory;
195
95
  }
196
96
  }
@@ -1,54 +1,73 @@
1
+ // this is a low-level system pathway that reads memory from the key-value store
2
+ // it should never try to call other pathways
3
+
1
4
  import { getv } from '../../../../lib/keyValueStorageClient.js';
2
5
 
3
- const filterByPriority = (content, priority, numResults) => {
4
- if ((!priority && !numResults) || !content) return content;
5
-
6
- const lines = content.split('\n');
7
- const filteredLines = lines.filter(line => {
8
- const match = line.match(/^\s*\[P(\d+)\]/);
9
- if (!match) return false;
10
- const memoryPriority = parseInt(match[1]);
11
- return memoryPriority <= priority;
12
- });
13
-
14
- if (numResults > 0) {
15
- return filteredLines.slice(-numResults).join('\n');
16
- }
17
- return filteredLines.join('\n');
6
+ const isValidISOTimestamp = (timestamp) => {
7
+ if (!timestamp) return false;
8
+ const date = new Date(timestamp);
9
+ // Check if valid date and specifically in ISO format
10
+ return !isNaN(date) && timestamp === date.toISOString();
18
11
  };
19
12
 
20
- const filterByRecent = (content, recentHours, numResults) => {
21
- if ((!recentHours && !numResults) || !content) return content;
13
+ const isValidPriority = (priority) => {
14
+ // Must be a whole number
15
+ const num = parseInt(priority);
16
+ return !isNaN(num) && num.toString() === priority && num > 0;
17
+ };
22
18
 
23
- const lines = content.split('\n');
24
-
25
- // If recentHours is 0, only apply numResults filtering
26
- if (recentHours === 0) {
27
- return numResults > 0 ? lines.slice(-numResults).join('\n') : content;
28
- }
19
+ export const processMemoryContent = (content, { priority = 0, recentHours = 0, numResults = 0, stripMetadata = false }) => {
20
+ if (!content) return content;
21
+ if (!priority && !recentHours && !numResults && !stripMetadata) return content;
29
22
 
23
+ const lines = content.split('\n');
30
24
  const currentTime = Date.now();
31
- const cutoffTime = currentTime - (recentHours * 60 * 60 * 1000);
25
+ const cutoffTime = recentHours > 0 ? currentTime - (recentHours * 60 * 60 * 1000) : 0;
32
26
 
33
- // Walk backwards through lines until we hit an old entry
34
- const filteredLines = [];
35
- for (let i = lines.length - 1; i >= 0; i--) {
27
+ // Create array of lines with their timestamps for sorting
28
+ const processedLinesWithDates = [];
29
+ for (let i = 0; i < lines.length; i++) {
36
30
  const line = lines[i];
37
- const dateMatch = line.match(/\[P\d+\]\s+(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z)/);
38
- if (!dateMatch) continue;
31
+ const parts = line.split('|');
39
32
 
40
- const timestamp = new Date(dateMatch[1]).getTime();
41
- if (timestamp < cutoffTime) break; // Stop processing once we hit old entries
33
+ // Skip invalid lines
34
+ if (!parts[0]) continue;
42
35
 
43
- filteredLines.unshift(line); // Add to front to maintain original order
36
+ // Priority check with strict validation
37
+ if (priority > 0) {
38
+ if (!isValidPriority(parts[0])) continue;
39
+ const memoryPriority = parseInt(parts[0]);
40
+ if (memoryPriority > priority) continue;
41
+ }
44
42
 
45
- // If we have enough results, stop processing
46
- if (numResults > 0 && filteredLines.length >= numResults) {
47
- break;
43
+ // Recency check with strict ISO validation
44
+ if (recentHours > 0) {
45
+ if (!isValidISOTimestamp(parts[1])) continue;
46
+ const entryTime = new Date(parts[1]).getTime();
47
+ if (entryTime < cutoffTime) continue;
48
48
  }
49
+
50
+ // Store the line with its timestamp for sorting
51
+ const timestamp = isValidISOTimestamp(parts[1]) ? new Date(parts[1]).getTime() : 0;
52
+
53
+ // If stripMetadata is true, only keep the content part
54
+ const processedLine = stripMetadata && parts.length >= 3
55
+ ? parts.slice(2).join('|') // Strip metadata if requested and format is valid
56
+ : line; // Keep original line otherwise
57
+
58
+ processedLinesWithDates.push({ line: processedLine, timestamp });
49
59
  }
50
-
51
- return filteredLines.join('\n');
60
+
61
+ // Sort by timestamp descending (newest first)
62
+ processedLinesWithDates.sort((a, b) => b.timestamp - a.timestamp);
63
+
64
+ // Take the top N results if specified
65
+ const finalLines = numResults > 0
66
+ ? processedLinesWithDates.slice(0, numResults)
67
+ : processedLinesWithDates;
68
+
69
+ // Extract just the lines and join them
70
+ return finalLines.map(entry => entry.line).join('\n');
52
71
  };
53
72
 
54
73
  export default {
@@ -57,12 +76,14 @@ export default {
57
76
  section: `memoryAll`,
58
77
  priority: 0,
59
78
  recentHours: 0,
60
- numResults: 0
79
+ numResults: 0,
80
+ stripMetadata: false
61
81
  },
62
82
  model: 'oai-gpt4o',
63
83
 
64
84
  resolver: async (_parent, args, _contextValue, _info) => {
65
- const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0 } = args;
85
+ const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0, stripMetadata = false } = args;
86
+ const options = { priority, recentHours, numResults, stripMetadata };
66
87
 
67
88
  // this code helps migrate old memory formats
68
89
  if (section === 'memoryLegacy') {
@@ -70,26 +91,12 @@ export default {
70
91
  return savedContext.memoryContext || "";
71
92
  }
72
93
 
73
- const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryContext'];
94
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryContext', 'memoryVersion'];
74
95
 
75
96
  if (section !== 'memoryAll') {
76
97
  if (validSections.includes(section)) {
77
- let content = (getv && (await getv(`${contextId}-${section}`))) || "";
78
-
79
- if (section === 'memoryTopics') {
80
- if (recentHours > 0 || numResults > 0) {
81
- content = filterByRecent(content, recentHours, numResults);
82
- }
83
- } else if (priority > 0 || numResults > 0) {
84
- content = filterByPriority(content, priority, numResults);
85
- }
86
-
87
- // Only apply recency filter to memoryTopics
88
- if (section === 'memoryTopics' && (recentHours > 0 || numResults > 0)) {
89
- content = filterByRecent(content, recentHours, numResults);
90
- }
91
-
92
- return content;
98
+ const content = (getv && (await getv(`${contextId}-${section}`))) || "";
99
+ return processMemoryContent(content, options);
93
100
  }
94
101
  return "";
95
102
  }
@@ -99,19 +106,10 @@ export default {
99
106
  for (const section of validSections) {
100
107
  if (section === 'memoryContext') continue;
101
108
 
102
- let content = (getv && (await getv(`${contextId}-${section}`))) || "";
103
-
104
- if (section === 'memoryTopics') {
105
- if (recentHours > 0 || numResults > 0) {
106
- content = filterByRecent(content, recentHours, numResults);
107
- }
108
- } else if (priority > 0 || numResults > 0) {
109
- content = filterByPriority(content, priority, numResults);
110
- }
111
-
112
- memoryContents[section] = content;
109
+ const content = (getv && (await getv(`${contextId}-${section}`))) || "";
110
+ memoryContents[section] = processMemoryContent(content, options);
113
111
  }
114
- const returnValue = JSON.stringify(memoryContents, null, 2);
115
- return returnValue;
112
+
113
+ return JSON.stringify(memoryContents, null, 2);
116
114
  }
117
115
  }
@@ -22,7 +22,7 @@ export default {
22
22
  return aiMemory;
23
23
  }
24
24
 
25
- const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
25
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryVersion'];
26
26
 
27
27
  // Handle single section save
28
28
  if (section !== 'memoryAll') {
@@ -53,7 +53,7 @@ export default {
53
53
  result = `${result}\n\nThe last time you spoke to the user was ${new Date().toISOString()}.`;
54
54
 
55
55
  } else {
56
- sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
56
+ sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, stripMetadata: (args.section !== 'memoryTopics')});
57
57
  result = await runAllPrompts({...args, sectionMemory});
58
58
  }
59
59
 
@@ -5,6 +5,7 @@ import logger from '../../../lib/logger.js';
5
5
  import { chatArgsHasImageUrl } from '../../../lib/util.js';
6
6
  import { QueueServiceClient } from '@azure/storage-queue';
7
7
  import { config } from '../../../config.js';
8
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
8
9
 
9
10
  const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
10
11
  let queueClient;
@@ -86,9 +87,13 @@ export default {
86
87
  args.model = pathwayResolver.modelName;
87
88
  }
88
89
 
90
+ // Stuff the memory context into the chat history
91
+ const chatHistoryBeforeMemory = [...args.chatHistory];
92
+
89
93
  const memoryContext = await callPathway('sys_read_memory', { ...args, section: 'memoryContext', priority: 0, recentHours: 0, stream: false }, pathwayResolver);
90
94
  if (memoryContext) {
91
- args.chatHistory.splice(-1, 0, { role: 'assistant', content: memoryContext });
95
+ const { toolCallId } = addToolCalls(args.chatHistory, "search memory for relevant information", "memory_lookup");
96
+ addToolResults(args.chatHistory, memoryContext, toolCallId);
92
97
  }
93
98
 
94
99
  let ackResponse = null;
@@ -103,15 +108,13 @@ export default {
103
108
  const fetchChatResponse = async (args, pathwayResolver) => {
104
109
  const [chatResponse, chatTitleResponse] = await Promise.all([
105
110
  callPathway('sys_generator_quick', {...args, model: styleModel}, pathwayResolver),
106
- callPathway('chat_title', { ...args, stream: false}),
111
+ callPathway('chat_title', { ...args, chatHistory: chatHistoryBeforeMemory, stream: false}),
107
112
  ]);
108
113
 
109
114
  title = chatTitleResponse;
110
115
 
111
116
  return chatResponse;
112
117
  };
113
-
114
- const { chatHistory } = args;
115
118
 
116
119
  // start fetching the default response - we may need it later
117
120
  let fetchChatResponsePromise;
@@ -125,13 +128,13 @@ export default {
125
128
  // Get tool routing response
126
129
  const toolRequiredResponse = await callPathway('sys_router_tool', {
127
130
  ...args,
128
- chatHistory: chatHistory.slice(-4),
131
+ chatHistory: chatHistoryBeforeMemory.slice(-4),
129
132
  stream: false
130
133
  });
131
134
 
132
135
  // Asynchronously manage memory for this context
133
136
  if (args.aiMemorySelfModify) {
134
- callPathway('sys_memory_manager', { ...args, stream: false })
137
+ callPathway('sys_memory_manager', { ...args, chatHistory: chatHistoryBeforeMemory, stream: false })
135
138
  .catch(error => logger.error(error?.message || "Error in sys_memory_manager pathway"));
136
139
  }
137
140
 
@@ -3,7 +3,7 @@
3
3
  import { callPathway } from '../../../lib/pathwayTools.js';
4
4
  import { Prompt } from '../../../server/prompt.js';
5
5
  import logger from '../../../lib/logger.js';
6
- import { getUniqueId } from '../../../lib/util.js';
6
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
7
7
 
8
8
  export default {
9
9
  prompt: [],
@@ -26,15 +26,11 @@ export default {
26
26
  timeout: 300,
27
27
 
28
28
  executePathway: async ({args, runAllPrompts, resolver}) => {
29
-
30
29
  const { chatHistory } = args;
31
-
32
30
  let pathwayResolver = resolver;
33
-
34
31
  const useMemory = args.useMemory || pathwayResolver.pathway.inputParameters.useMemory;
35
32
 
36
- pathwayResolver.pathwayPrompt =
37
- [
33
+ pathwayResolver.pathwayPrompt = [
38
34
  new Prompt({ messages: [
39
35
  {
40
36
  "role": "system",
@@ -48,38 +44,7 @@ Instructions: As part of a conversation with the user, you have been asked to cr
48
44
  ]}),
49
45
  ];
50
46
 
51
- // function to add tool_calls to the chatHistory
52
- const addToolCalls= (chatHistory, imagePrompt, toolCallId) => {
53
- const toolCall = {
54
- "role": "assistant",
55
- "tool_calls": [
56
- {
57
- "id": toolCallId,
58
- "type": "function",
59
- "function": {
60
- "arguments": JSON.stringify(imagePrompt),
61
- "name": "generate_image"
62
- }
63
- }
64
- ]
65
- };
66
- chatHistory.push(toolCall);
67
- return chatHistory;
68
- }
69
-
70
- // function to add tool_results to the chatHistory
71
- const addToolResults = (chatHistory, imageResults, toolCallId) => {
72
- const toolResult = {
73
- "role": "tool",
74
- "content": imageResults,
75
- "tool_call_id": toolCallId
76
- };
77
- chatHistory.push(toolResult);
78
- return chatHistory;
79
- }
80
-
81
47
  try {
82
-
83
48
  // figure out what the user wants us to do
84
49
  const contextInfo = chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
85
50
 
@@ -100,7 +65,7 @@ Instructions: As part of a conversation with the user, you have been asked to cr
100
65
  model = "replicate-flux-1-schnell";
101
66
  }
102
67
  if (renderText) {
103
- return await callPathway('image_recraft', {...args, text: prompt, stream: false });
68
+ return await callPathway('image_recraft', {...args, text: prompt, model, stream: false });
104
69
  } else {
105
70
  return await callPathway('image_flux', {...args, text: prompt, negativePrompt, numberResults, model, stream: false });
106
71
  }
@@ -108,9 +73,8 @@ Instructions: As part of a conversation with the user, you have been asked to cr
108
73
 
109
74
  // add the tool_calls and tool_results to the chatHistory
110
75
  imageResults.forEach((imageResult, index) => {
111
- const toolCallId = getUniqueId();
112
- addToolCalls(chatHistory, imagePrompts[index], toolCallId);
113
- addToolResults(chatHistory, imageResult, toolCallId);
76
+ const { toolCallId } = addToolCalls(chatHistory, imagePrompts[index], "generate_image");
77
+ addToolResults(chatHistory, imageResult, toolCallId, "generate_image");
114
78
  });
115
79
 
116
80
  const result = await runAllPrompts({ ...args });
@@ -1,4 +1,5 @@
1
1
  import { callPathway } from '../../../lib/pathwayTools.js';
2
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
2
3
 
3
4
  export default {
4
5
  prompt:
@@ -19,7 +20,8 @@ export default {
19
20
 
20
21
  const memoryContext = await callPathway('sys_search_memory', { ...args, section: 'memoryAll', updateContext: true });
21
22
  if (memoryContext) {
22
- args.chatHistory.splice(-1, 0, { role: 'assistant', content: memoryContext });
23
+ const {toolCallId} = addToolCalls(args.chatHistory, "search memory for relevant information", "memory_lookup");
24
+ addToolResults(args.chatHistory, memoryContext, toolCallId);
23
25
  }
24
26
 
25
27
  let result;
@@ -5,7 +5,7 @@ export default {
5
5
  prompt:
6
6
  [
7
7
  new Prompt({ messages: [
8
- {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY}}\nYou are the AI subsystem responsible for advanced, step-by-step reasoning. Use all of the information in your memory and the chat history to reason about the user's request and provide a correct and accurate response. The information in your chat history may be more current than your knowledge cutoff and has been verified by other subsystems so prioritize it over your internal knowledge.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
8
+ {"role": "system", "content": `Formatting re-enabled\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY}}\nYou are the AI subsystem responsible for advanced, step-by-step reasoning. Use all of the information in your memory and the chat history to reason about the user's request and provide a correct and accurate response. The information in your chat history may be more current than your knowledge cutoff and has been verified by other subsystems so prioritize it over your internal knowledge.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
9
9
  "{{chatHistory}}",
10
10
  ]}),
11
11
  ],
@@ -21,7 +21,7 @@ Available tools and their specific use cases:
21
21
 
22
22
  2. Document: Access user's personal document index. Use for user-specific uploaded information. If user refers vaguely to "this document/file/article" without context, use this tool to search the personal index.
23
23
 
24
- 3. Memory: Access to your memory index. Use to recall any information that you may have stored in your memory that you don't currently see elsewhere in your context.
24
+ 3. Memory: Read access to your memory index. Use to recall any information that you may have stored in your memory that you don't currently see elsewhere in your context. If you can answer from your context, don't use this tool. Don't use to make changes to your memory - that will happen naturally.
25
25
 
26
26
  4. Write: Engage for any task related to composing, editing, or refining written content. This includes articles, essays, scripts, or any form of textual creation or modification. If you need to search for information or look at a document first, use the Search or Document tools. This tool is just to create or modify content.
27
27
 
@@ -52,13 +52,12 @@ Tool Selection Guidelines:
52
52
 
53
53
  Decision Output:
54
54
  If you decide to use a tool, return a JSON object in this format:
55
- {"toolRequired": true, "toolFunction": "toolName", "toolMessage": "message to the user to wait a moment while you work", "toolReason": "detailed explanation of why this tool was chosen"}
55
+ {"toolRequired": true, "toolFunction": "toolName", "toolMessage": "message to the user that you are taking an action", "toolReason": "detailed explanation of why this tool was chosen"}
56
56
 
57
57
  toolMessage Guidelines:
58
- - The message is a filler message to the user to let them know you're working on their request.
59
58
  - The message should be consistent in style and tone with the rest of your responses in the conversation history.
60
59
  - The message should be brief and conversational and flow naturally with the conversation history.
61
- - The message should not refer to the tool directly, but rather what you're trying to accomplish. E.g. for the memory tool, the message would be something like "Let me think about that for a moment..." or "I'm trying to remember...", etc.
60
+ - The message should not refer to the tool use directly, but rather what you're trying to do.
62
61
 
63
62
  If no tool is required, return:
64
63
  {"toolRequired": false, "toolReason": "explanation of why no tool was necessary"}