@aj-archipelago/cortex 1.3.21 → 1.3.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +64 -0
- package/config.js +26 -1
- package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +2 -2
- package/helper-apps/cortex-realtime-voice-server/src/realtime/client.ts +9 -4
- package/helper-apps/cortex-realtime-voice-server/src/realtime/realtimeTypes.ts +1 -0
- package/lib/util.js +5 -25
- package/package.json +5 -2
- package/pathways/system/entity/memory/shared/sys_memory_helpers.js +228 -0
- package/pathways/system/entity/memory/sys_memory_format.js +30 -0
- package/pathways/system/entity/memory/sys_memory_manager.js +85 -27
- package/pathways/system/entity/memory/sys_memory_process.js +154 -0
- package/pathways/system/entity/memory/sys_memory_required.js +4 -2
- package/pathways/system/entity/memory/sys_memory_topic.js +22 -0
- package/pathways/system/entity/memory/sys_memory_update.js +50 -150
- package/pathways/system/entity/memory/sys_read_memory.js +67 -69
- package/pathways/system/entity/memory/sys_save_memory.js +1 -1
- package/pathways/system/entity/memory/sys_search_memory.js +1 -1
- package/pathways/system/entity/sys_entity_start.js +9 -6
- package/pathways/system/entity/sys_generator_image.js +5 -41
- package/pathways/system/entity/sys_generator_memory.js +3 -1
- package/pathways/system/entity/sys_generator_reasoning.js +1 -1
- package/pathways/system/entity/sys_router_tool.js +3 -4
- package/pathways/system/rest_streaming/sys_claude_35_sonnet.js +1 -1
- package/pathways/system/rest_streaming/sys_claude_3_haiku.js +1 -1
- package/pathways/system/rest_streaming/sys_google_gemini_chat.js +1 -1
- package/pathways/system/rest_streaming/sys_ollama_chat.js +21 -0
- package/pathways/system/rest_streaming/sys_ollama_completion.js +14 -0
- package/pathways/system/rest_streaming/sys_openai_chat_o1.js +1 -1
- package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +1 -1
- package/pathways/transcribe_gemini.js +525 -0
- package/server/modelExecutor.js +8 -0
- package/server/pathwayResolver.js +13 -8
- package/server/plugins/claude3VertexPlugin.js +150 -18
- package/server/plugins/gemini15ChatPlugin.js +90 -1
- package/server/plugins/gemini15VisionPlugin.js +16 -3
- package/server/plugins/modelPlugin.js +12 -9
- package/server/plugins/ollamaChatPlugin.js +158 -0
- package/server/plugins/ollamaCompletionPlugin.js +147 -0
- package/server/rest.js +70 -8
- package/tests/claude3VertexToolConversion.test.js +411 -0
- package/tests/memoryfunction.test.js +560 -46
- package/tests/multimodal_conversion.test.js +169 -0
- package/tests/openai_api.test.js +332 -0
- package/tests/transcribe_gemini.test.js +217 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
2
|
+
import { callPathway } from '../../../../lib/pathwayTools.js';
|
|
3
|
+
import { config } from '../../../../config.js';
|
|
4
|
+
import { normalizeMemoryFormat, enforceTokenLimit, modifyText } from './shared/sys_memory_helpers.js';
|
|
5
|
+
|
|
6
|
+
export default {
|
|
7
|
+
prompt:
|
|
8
|
+
[
|
|
9
|
+
new Prompt({
|
|
10
|
+
messages: [
|
|
11
|
+
{
|
|
12
|
+
"role": "system",
|
|
13
|
+
"content": `You are part of an AI entity named {{{aiName}}} that is processing memories during a rest period, similar to how humans process memories during sleep. Your task is to analyze the memories, consolidate them, extract learnings, and clean up the memory space.
|
|
14
|
+
|
|
15
|
+
Instructions for memory processing:
|
|
16
|
+
|
|
17
|
+
1. CONSOLIDATION:
|
|
18
|
+
- Identify similar or related memories that can be combined into a single, more coherent memory
|
|
19
|
+
- Look for patterns or recurring themes that can be abstracted into general knowledge
|
|
20
|
+
- Group temporal sequences of related events into single comprehensive memories
|
|
21
|
+
|
|
22
|
+
2. LEARNING:
|
|
23
|
+
- Transform specific experiences/mistakes into general principles or learnings
|
|
24
|
+
- Extract key insights from multiple related experiences
|
|
25
|
+
- Identify cause-and-effect patterns across memories
|
|
26
|
+
- Convert procedural memories (how to do things) into more abstract capabilities
|
|
27
|
+
|
|
28
|
+
3. CLEANUP:
|
|
29
|
+
- Remove redundant or duplicate memories
|
|
30
|
+
- Clean up memories that are no longer relevant or useful
|
|
31
|
+
- Reduce specific details while preserving important concepts
|
|
32
|
+
- Remove emotional residue while keeping the learned lessons
|
|
33
|
+
|
|
34
|
+
4. PRIORITIZATION:
|
|
35
|
+
- Strengthen important memories by updating their priority
|
|
36
|
+
- Identify critical insights that should be easily accessible
|
|
37
|
+
- Mark foundational learnings with higher priority
|
|
38
|
+
|
|
39
|
+
Return a JSON array of modification objects that will implement these changes:
|
|
40
|
+
- For consolidation: Use "delete" for individual memories and "add" for the new consolidated memory
|
|
41
|
+
- For learning: Use "add" for new abstract learnings and "delete" for specific instances being abstracted
|
|
42
|
+
- For cleanup: Use "delete" for redundant/irrelevant memories
|
|
43
|
+
- For priority updates: Use "change" with the same text but updated priority
|
|
44
|
+
|
|
45
|
+
Return null when no more processing is needed (memories are optimally consolidated).
|
|
46
|
+
|
|
47
|
+
Each modification object should look like:
|
|
48
|
+
{
|
|
49
|
+
type: "add"|"change"|"delete",
|
|
50
|
+
pattern: "regex to match existing memory" (for change/delete),
|
|
51
|
+
newtext: "new memory text" (for add/change),
|
|
52
|
+
priority: "1"|"2"|"3" (optional, 1=highest)
|
|
53
|
+
}`
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"role": "user",
|
|
57
|
+
"content": "Process the following memories for consolidation, learning, and cleanup. Return a JSON array of modification objects that will optimize the memory space.\n\n<MEMORIES>\n{{{sectionMemory}}}\n</MEMORIES>"
|
|
58
|
+
},
|
|
59
|
+
]
|
|
60
|
+
}),
|
|
61
|
+
],
|
|
62
|
+
|
|
63
|
+
inputParameters: {
|
|
64
|
+
chatHistory: [{role: '', content: []}],
|
|
65
|
+
aiName: "Jarvis",
|
|
66
|
+
contextId: ``,
|
|
67
|
+
section: "",
|
|
68
|
+
maxIterations: 5
|
|
69
|
+
},
|
|
70
|
+
model: 'oai-gpt4o',
|
|
71
|
+
useInputChunking: false,
|
|
72
|
+
enableDuplicateRequests: false,
|
|
73
|
+
json: true,
|
|
74
|
+
timeout: 300,
|
|
75
|
+
executePathway: async ({args, runAllPrompts}) => {
|
|
76
|
+
args = { ...args, ...config.get('entityConstants') };
|
|
77
|
+
|
|
78
|
+
if (!args.section) {
|
|
79
|
+
return "Memory not processed - no section specified";
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
|
|
83
|
+
sectionMemory = await normalizeMemoryFormat({contextId: args.contextId, section: args.section}, sectionMemory);
|
|
84
|
+
|
|
85
|
+
let iteration = 0;
|
|
86
|
+
let maxIterations = args.maxIterations || 5;
|
|
87
|
+
let totalModifications = 0;
|
|
88
|
+
|
|
89
|
+
while (iteration < maxIterations) {
|
|
90
|
+
iteration++;
|
|
91
|
+
console.log(`Processing iteration ${iteration}...`);
|
|
92
|
+
|
|
93
|
+
// Process the memories
|
|
94
|
+
const result = await runAllPrompts({
|
|
95
|
+
...args,
|
|
96
|
+
sectionMemory
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
// If null is returned, processing is complete
|
|
100
|
+
if (result === null) {
|
|
101
|
+
console.log("Memory processing complete - no more optimizations needed");
|
|
102
|
+
break;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
let modifications = [];
|
|
106
|
+
try {
|
|
107
|
+
modifications = JSON.parse(result);
|
|
108
|
+
if (!Array.isArray(modifications)) {
|
|
109
|
+
throw new Error('Modifications must be an array');
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Validate modifications
|
|
113
|
+
modifications = modifications.filter(mod => {
|
|
114
|
+
if (!mod.type || !['add', 'delete', 'change'].includes(mod.type)) {
|
|
115
|
+
console.warn('Invalid modification type:', mod);
|
|
116
|
+
return false;
|
|
117
|
+
}
|
|
118
|
+
if ((mod.type === 'delete' || mod.type === 'change') && !mod.pattern) {
|
|
119
|
+
console.warn('Missing pattern for modification:', mod);
|
|
120
|
+
return false;
|
|
121
|
+
}
|
|
122
|
+
if ((mod.type === 'add' || mod.type === 'change') && !mod.newtext) {
|
|
123
|
+
console.warn('Missing newtext for modification:', mod);
|
|
124
|
+
return false;
|
|
125
|
+
}
|
|
126
|
+
return true;
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
if (modifications.length === 0) {
|
|
130
|
+
console.log("No valid modifications in this iteration");
|
|
131
|
+
break;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Apply the modifications
|
|
135
|
+
sectionMemory = modifyText(sectionMemory, modifications);
|
|
136
|
+
sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
|
|
137
|
+
await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
|
|
138
|
+
|
|
139
|
+
totalModifications += modifications.length;
|
|
140
|
+
console.log(`Applied ${modifications.length} modifications in iteration ${iteration}`);
|
|
141
|
+
|
|
142
|
+
} catch (error) {
|
|
143
|
+
console.warn('Error processing modifications:', error);
|
|
144
|
+
break;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
return {
|
|
149
|
+
finalMemory: sectionMemory,
|
|
150
|
+
iterations: iteration,
|
|
151
|
+
totalModifications
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
}
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
import { Prompt } from '../../../../server/prompt.js';
|
|
2
|
+
import { config } from '../../../../config.js';
|
|
2
3
|
|
|
3
4
|
export default {
|
|
4
5
|
prompt:
|
|
5
6
|
[
|
|
6
7
|
new Prompt({ messages: [
|
|
7
|
-
{"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\
|
|
8
|
-
{"role": "user", "content": "Generate a JSON object to indicate if memory is required
|
|
8
|
+
{"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`},
|
|
9
|
+
{"role": "user", "content": "Generate a JSON object to indicate if memory is required and what memories to process based on the last turn of the conversation."},
|
|
9
10
|
]}),
|
|
10
11
|
],
|
|
11
12
|
inputParameters: {
|
|
@@ -18,4 +19,5 @@ export default {
|
|
|
18
19
|
model: 'oai-gpt4o',
|
|
19
20
|
useInputChunking: false,
|
|
20
21
|
json: true,
|
|
22
|
+
...config.get('entityConstants')
|
|
21
23
|
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
2
|
+
import { config } from '../../../../config.js';
|
|
3
|
+
|
|
4
|
+
export default {
|
|
5
|
+
prompt:
|
|
6
|
+
[
|
|
7
|
+
new Prompt({ messages: [
|
|
8
|
+
{"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) and generate a topic for the conversation. The topic should be a single sentence that captures the main idea and details of the conversation.`},
|
|
9
|
+
{"role": "user", "content": "Generate a topic for the conversation. Return only the topic with no additional notes or commentary."},
|
|
10
|
+
]}),
|
|
11
|
+
],
|
|
12
|
+
inputParameters: {
|
|
13
|
+
chatHistory: [{role: '', content: []}],
|
|
14
|
+
contextId: ``,
|
|
15
|
+
text: '',
|
|
16
|
+
aiName: "Jarvis",
|
|
17
|
+
language: "English",
|
|
18
|
+
},
|
|
19
|
+
model: 'oai-gpt4o',
|
|
20
|
+
useInputChunking: false,
|
|
21
|
+
...config.get('entityConstants')
|
|
22
|
+
}
|
|
@@ -1,127 +1,7 @@
|
|
|
1
1
|
import { Prompt } from '../../../../server/prompt.js';
|
|
2
2
|
import { callPathway } from '../../../../lib/pathwayTools.js';
|
|
3
|
-
import { encode } from '../../../../lib/encodeCache.js';
|
|
4
3
|
import { config } from '../../../../config.js';
|
|
5
|
-
|
|
6
|
-
const modifyText = (text, modifications) => {
|
|
7
|
-
let modifiedText = text || '';
|
|
8
|
-
|
|
9
|
-
modifications.forEach(mod => {
|
|
10
|
-
if (mod.type === 'delete' && !mod.pattern) {
|
|
11
|
-
console.warn('Delete modification missing pattern');
|
|
12
|
-
return;
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
let regex;
|
|
16
|
-
if (mod.type === 'delete') {
|
|
17
|
-
// For delete, handle the pattern more carefully
|
|
18
|
-
const pattern = mod.pattern
|
|
19
|
-
.replace(/\\\[/g, '\\[')
|
|
20
|
-
.replace(/\\\]/g, '\\]')
|
|
21
|
-
.replace(/\\\(/g, '\\(')
|
|
22
|
-
.replace(/\\\)/g, '\\)')
|
|
23
|
-
.replace(/\\\{/g, '\\{')
|
|
24
|
-
.replace(/\\\}/g, '\\}')
|
|
25
|
-
.replace(/\\\*/g, '\\*')
|
|
26
|
-
.replace(/\\\+/g, '\\+')
|
|
27
|
-
.replace(/\\\?/g, '\\?')
|
|
28
|
-
.replace(/\\\./g, '\\.')
|
|
29
|
-
.replace(/\\\|/g, '\\|');
|
|
30
|
-
|
|
31
|
-
// Create a regex that matches the entire line with optional priority prefix
|
|
32
|
-
regex = new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${pattern}\\s*$`, 'gm');
|
|
33
|
-
} else {
|
|
34
|
-
regex = new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${mod.pattern || ''}`, 'ms');
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
switch (mod.type) {
|
|
38
|
-
case 'add':
|
|
39
|
-
if (mod.newtext) {
|
|
40
|
-
const text = mod.newtext.trim();
|
|
41
|
-
if (!text.match(/^\[P[1-5]\]/)) {
|
|
42
|
-
modifiedText = modifiedText + (modifiedText ? '\n' : '') +
|
|
43
|
-
`[P${mod.priority !== undefined ? mod.priority : '3'}] ${text}`;
|
|
44
|
-
} else {
|
|
45
|
-
modifiedText = modifiedText + (modifiedText ? '\n' : '') + text;
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
break;
|
|
49
|
-
case 'delete':
|
|
50
|
-
// Split into lines, filter out matching lines, and rejoin
|
|
51
|
-
modifiedText = modifiedText
|
|
52
|
-
.split('\n')
|
|
53
|
-
.filter(line => !line.match(regex))
|
|
54
|
-
.filter(line => line.trim())
|
|
55
|
-
.join('\n');
|
|
56
|
-
break;
|
|
57
|
-
default:
|
|
58
|
-
console.warn(`Unknown modification type: ${mod.type}`);
|
|
59
|
-
}
|
|
60
|
-
});
|
|
61
|
-
|
|
62
|
-
return modifiedText;
|
|
63
|
-
};
|
|
64
|
-
|
|
65
|
-
export { modifyText };
|
|
66
|
-
|
|
67
|
-
export const enforceTokenLimit = (text, maxTokens = 1000, isTopicsSection = false) => {
|
|
68
|
-
if (!text) return text;
|
|
69
|
-
|
|
70
|
-
const lines = text.split('\n')
|
|
71
|
-
.map(line => line.trim())
|
|
72
|
-
.filter(line => line);
|
|
73
|
-
|
|
74
|
-
if (isTopicsSection) {
|
|
75
|
-
const uniqueLines = [...new Set(lines)];
|
|
76
|
-
|
|
77
|
-
let tokens = encode(uniqueLines.join('\n')).length;
|
|
78
|
-
let safetyCounter = 0;
|
|
79
|
-
const maxIterations = uniqueLines.length;
|
|
80
|
-
|
|
81
|
-
while (tokens > maxTokens && uniqueLines.length > 0 && safetyCounter < maxIterations) {
|
|
82
|
-
uniqueLines.shift();
|
|
83
|
-
tokens = encode(uniqueLines.join('\n')).length;
|
|
84
|
-
safetyCounter++;
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
return uniqueLines.join('\n');
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
const seen = new Set();
|
|
91
|
-
const prioritizedLines = lines
|
|
92
|
-
.map(line => {
|
|
93
|
-
const match = line.match(/^\[P([1-5])\]/);
|
|
94
|
-
const priority = match ? parseInt(match[1]) : 3;
|
|
95
|
-
const contentOnly = line.replace(/^\[(?:P)?[1-5]\](?:\s*\[(?:P)?[1-5]\])*/g, '').trim();
|
|
96
|
-
|
|
97
|
-
return {
|
|
98
|
-
priority,
|
|
99
|
-
line: match ? line : `[P3] ${line}`,
|
|
100
|
-
contentOnly
|
|
101
|
-
};
|
|
102
|
-
})
|
|
103
|
-
.filter(item => {
|
|
104
|
-
if (seen.has(item.contentOnly)) {
|
|
105
|
-
return false;
|
|
106
|
-
}
|
|
107
|
-
seen.add(item.contentOnly);
|
|
108
|
-
return true;
|
|
109
|
-
});
|
|
110
|
-
|
|
111
|
-
prioritizedLines.sort((a, b) => b.priority - a.priority);
|
|
112
|
-
|
|
113
|
-
let tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
|
|
114
|
-
let safetyCounter = 0;
|
|
115
|
-
const maxIterations = prioritizedLines.length;
|
|
116
|
-
|
|
117
|
-
while (tokens > maxTokens && prioritizedLines.length > 0 && safetyCounter < maxIterations) {
|
|
118
|
-
prioritizedLines.shift();
|
|
119
|
-
tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
|
|
120
|
-
safetyCounter++;
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
return prioritizedLines.map(x => x.line).join('\n');
|
|
124
|
-
};
|
|
4
|
+
import { normalizeMemoryFormat, enforceTokenLimit, modifyText } from './shared/sys_memory_helpers.js';
|
|
125
5
|
|
|
126
6
|
export default {
|
|
127
7
|
prompt:
|
|
@@ -130,11 +10,11 @@ export default {
|
|
|
130
10
|
messages: [
|
|
131
11
|
{
|
|
132
12
|
"role": "system",
|
|
133
|
-
"content":
|
|
13
|
+
"content": `You are part of an AI entity named {{{aiName}}} that is in charge of memory management. You examine requests for adds and deletes of memories made by another part of your system and determine exactly how to apply the changes to the memory.\n\nInstructions:\n1. For each add request, check to see if a similar memory already exists. If it does not, create an add modification. If it does, create a change modification with a pattern that matches the existing memory.\n2. For each delete request, check to see if one or more memories matching the delete request exist. If they do, create a delete modification for each memory with a pattern that matches the existing memory to delete.\n3. If there are substantially duplicate memories, you must combine them into a single memory with deletes followed by an add modification.\n4. Return a JSON array of modification objects.\n\nModification objects look like the following:\nFor adds: {type: "add", pattern: "", newtext: "Text of the memory to add"}\nFor changes: {type: "change", pattern: "Text to match the memory to change", newtext: "Text of the memory to change to"}\nFor deletes: {type: "delete", pattern: "Text to match the memory to delete", newtext: ""}`
|
|
134
14
|
},
|
|
135
15
|
{
|
|
136
16
|
"role": "user",
|
|
137
|
-
"content": "<
|
|
17
|
+
"content": "Given the following memories and requests, determine which memories should be added, changed, or deleted. Return a JSON array of modification objects that will be applied to update your memory.\n\n<MEMORIES>\n{{{sectionMemory}}}\n</MEMORIES>\n\n<REQUESTS>\n{{{memoryRequests}}}\n</REQUESTS>\n\nReturn only the JSON array with no additional notes or commentary."
|
|
138
18
|
},
|
|
139
19
|
]
|
|
140
20
|
}),
|
|
@@ -145,6 +25,7 @@ export default {
|
|
|
145
25
|
aiName: "Jarvis",
|
|
146
26
|
contextId: ``,
|
|
147
27
|
section: "",
|
|
28
|
+
operations: "[]"
|
|
148
29
|
},
|
|
149
30
|
model: 'oai-gpt4o',
|
|
150
31
|
useInputChunking: false,
|
|
@@ -157,40 +38,59 @@ export default {
|
|
|
157
38
|
if (!args.section) {
|
|
158
39
|
return "Memory not updated - no section specified";
|
|
159
40
|
}
|
|
41
|
+
|
|
42
|
+
let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
|
|
160
43
|
|
|
161
|
-
|
|
44
|
+
sectionMemory = await normalizeMemoryFormat({contextId: args.contextId, section: args.section}, sectionMemory);
|
|
162
45
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
sectionPrompt = "You are managing the \"self\" section of your memory that stores your identity and your attributes as an AI entity - including your values, identity, purpose, personality, quirks, motives, preferences, likes or dislikes - whatever makes you you. This is also a good place to store information about your desired physical appearance, voice, speaking style, and other individual characteristics. Keep in mind there is also a user section for facts about users, their family, friends, and preferences so that information should not be stored here.\n\nInstructions:\n"
|
|
169
|
-
break;
|
|
170
|
-
case "memoryUser":
|
|
171
|
-
sectionPrompt = "You are managing the \"user\" section of your memory that stores information about user(s) that you are talking to - their identity, attributes, relationships, environment, preferences, interests, background, needs, and any other relevant user-specific information about their family, friends, etc.\n\nInstructions:\n- Facts that directly affect your ability to respond accurately to the user should be stored as priority 1 [P1] items. Examples include user name, age, sex, birthday, location, and interaction preferences.\n"
|
|
172
|
-
break;
|
|
173
|
-
case "memoryTopics":
|
|
174
|
-
sectionPrompt = "You are managing the \"topics\" section of your memory that stores conversation topics and topic history. Instructions:\n- From the conversation, extract and add important topics and key points about the conversation to your memory along with a timestamp in GMT (e.g. 2024-11-05T18:30:38.092Z).\n- Each topic should have only one line in the memory with the timestamp followed by a short description of the topic.\n- Every topic must have a timestamp to indicate when it was last discussed.\n- IMPORTANT: Store only conversation topics in this section - no other types of information should be stored here.\n"
|
|
175
|
-
break;
|
|
176
|
-
default:
|
|
177
|
-
return "Memory not updated - unknown section";
|
|
46
|
+
let operations;
|
|
47
|
+
try {
|
|
48
|
+
operations = JSON.parse(args.operations);
|
|
49
|
+
} catch (error) {
|
|
50
|
+
return "Memory not updated - error parsing operations";
|
|
178
51
|
}
|
|
179
52
|
|
|
180
|
-
|
|
53
|
+
if (operations.length > 0) {
|
|
54
|
+
// Run all operations through the prompt at once
|
|
55
|
+
const result = await runAllPrompts({
|
|
56
|
+
...args,
|
|
57
|
+
sectionMemory,
|
|
58
|
+
memoryRequests: JSON.stringify(operations)
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
let modifications = [];
|
|
62
|
+
try {
|
|
63
|
+
modifications = JSON.parse(result);
|
|
64
|
+
if (!Array.isArray(modifications)) {
|
|
65
|
+
throw new Error('Modifications must be an array');
|
|
66
|
+
}
|
|
181
67
|
|
|
182
|
-
|
|
68
|
+
// Validate modifications
|
|
69
|
+
modifications = modifications.filter(mod => {
|
|
70
|
+
if (!mod.type || !['add', 'delete', 'change'].includes(mod.type)) {
|
|
71
|
+
console.warn('Invalid modification type:', mod);
|
|
72
|
+
return false;
|
|
73
|
+
}
|
|
74
|
+
if ((mod.type === 'delete' || mod.type === 'change') && !mod.pattern) {
|
|
75
|
+
console.warn('Missing pattern for modification:', mod);
|
|
76
|
+
return false;
|
|
77
|
+
}
|
|
78
|
+
if ((mod.type === 'add' || mod.type === 'change') && !mod.newtext) {
|
|
79
|
+
console.warn('Missing newtext for modification:', mod);
|
|
80
|
+
return false;
|
|
81
|
+
}
|
|
82
|
+
return true;
|
|
83
|
+
});
|
|
183
84
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
85
|
+
if (modifications.length > 0) {
|
|
86
|
+
sectionMemory = modifyText(sectionMemory, modifications);
|
|
87
|
+
sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
|
|
88
|
+
await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
|
|
89
|
+
}
|
|
90
|
+
} catch (error) {
|
|
91
|
+
console.warn('Error processing modifications:', error);
|
|
190
92
|
}
|
|
191
|
-
return sectionMemory;
|
|
192
|
-
} catch (error) {
|
|
193
|
-
return "Memory not updated - error parsing modifications";
|
|
194
93
|
}
|
|
94
|
+
return sectionMemory;
|
|
195
95
|
}
|
|
196
96
|
}
|
|
@@ -1,54 +1,73 @@
|
|
|
1
|
+
// this is a low-level system pathway that reads memory from the key-value store
|
|
2
|
+
// it should never try to call other pathways
|
|
3
|
+
|
|
1
4
|
import { getv } from '../../../../lib/keyValueStorageClient.js';
|
|
2
5
|
|
|
3
|
-
const
|
|
4
|
-
if (
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
const match = line.match(/^\s*\[P(\d+)\]/);
|
|
9
|
-
if (!match) return false;
|
|
10
|
-
const memoryPriority = parseInt(match[1]);
|
|
11
|
-
return memoryPriority <= priority;
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
if (numResults > 0) {
|
|
15
|
-
return filteredLines.slice(-numResults).join('\n');
|
|
16
|
-
}
|
|
17
|
-
return filteredLines.join('\n');
|
|
6
|
+
const isValidISOTimestamp = (timestamp) => {
|
|
7
|
+
if (!timestamp) return false;
|
|
8
|
+
const date = new Date(timestamp);
|
|
9
|
+
// Check if valid date and specifically in ISO format
|
|
10
|
+
return !isNaN(date) && timestamp === date.toISOString();
|
|
18
11
|
};
|
|
19
12
|
|
|
20
|
-
const
|
|
21
|
-
|
|
13
|
+
const isValidPriority = (priority) => {
|
|
14
|
+
// Must be a whole number
|
|
15
|
+
const num = parseInt(priority);
|
|
16
|
+
return !isNaN(num) && num.toString() === priority && num > 0;
|
|
17
|
+
};
|
|
22
18
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
if (recentHours === 0) {
|
|
27
|
-
return numResults > 0 ? lines.slice(-numResults).join('\n') : content;
|
|
28
|
-
}
|
|
19
|
+
export const processMemoryContent = (content, { priority = 0, recentHours = 0, numResults = 0, stripMetadata = false }) => {
|
|
20
|
+
if (!content) return content;
|
|
21
|
+
if (!priority && !recentHours && !numResults && !stripMetadata) return content;
|
|
29
22
|
|
|
23
|
+
const lines = content.split('\n');
|
|
30
24
|
const currentTime = Date.now();
|
|
31
|
-
const cutoffTime = currentTime - (recentHours * 60 * 60 * 1000);
|
|
25
|
+
const cutoffTime = recentHours > 0 ? currentTime - (recentHours * 60 * 60 * 1000) : 0;
|
|
32
26
|
|
|
33
|
-
//
|
|
34
|
-
const
|
|
35
|
-
for (let i =
|
|
27
|
+
// Create array of lines with their timestamps for sorting
|
|
28
|
+
const processedLinesWithDates = [];
|
|
29
|
+
for (let i = 0; i < lines.length; i++) {
|
|
36
30
|
const line = lines[i];
|
|
37
|
-
const
|
|
38
|
-
if (!dateMatch) continue;
|
|
31
|
+
const parts = line.split('|');
|
|
39
32
|
|
|
40
|
-
|
|
41
|
-
if (
|
|
33
|
+
// Skip invalid lines
|
|
34
|
+
if (!parts[0]) continue;
|
|
42
35
|
|
|
43
|
-
|
|
36
|
+
// Priority check with strict validation
|
|
37
|
+
if (priority > 0) {
|
|
38
|
+
if (!isValidPriority(parts[0])) continue;
|
|
39
|
+
const memoryPriority = parseInt(parts[0]);
|
|
40
|
+
if (memoryPriority > priority) continue;
|
|
41
|
+
}
|
|
44
42
|
|
|
45
|
-
//
|
|
46
|
-
if (
|
|
47
|
-
|
|
43
|
+
// Recency check with strict ISO validation
|
|
44
|
+
if (recentHours > 0) {
|
|
45
|
+
if (!isValidISOTimestamp(parts[1])) continue;
|
|
46
|
+
const entryTime = new Date(parts[1]).getTime();
|
|
47
|
+
if (entryTime < cutoffTime) continue;
|
|
48
48
|
}
|
|
49
|
+
|
|
50
|
+
// Store the line with its timestamp for sorting
|
|
51
|
+
const timestamp = isValidISOTimestamp(parts[1]) ? new Date(parts[1]).getTime() : 0;
|
|
52
|
+
|
|
53
|
+
// If stripMetadata is true, only keep the content part
|
|
54
|
+
const processedLine = stripMetadata && parts.length >= 3
|
|
55
|
+
? parts.slice(2).join('|') // Strip metadata if requested and format is valid
|
|
56
|
+
: line; // Keep original line otherwise
|
|
57
|
+
|
|
58
|
+
processedLinesWithDates.push({ line: processedLine, timestamp });
|
|
49
59
|
}
|
|
50
|
-
|
|
51
|
-
|
|
60
|
+
|
|
61
|
+
// Sort by timestamp descending (newest first)
|
|
62
|
+
processedLinesWithDates.sort((a, b) => b.timestamp - a.timestamp);
|
|
63
|
+
|
|
64
|
+
// Take the top N results if specified
|
|
65
|
+
const finalLines = numResults > 0
|
|
66
|
+
? processedLinesWithDates.slice(0, numResults)
|
|
67
|
+
: processedLinesWithDates;
|
|
68
|
+
|
|
69
|
+
// Extract just the lines and join them
|
|
70
|
+
return finalLines.map(entry => entry.line).join('\n');
|
|
52
71
|
};
|
|
53
72
|
|
|
54
73
|
export default {
|
|
@@ -57,12 +76,14 @@ export default {
|
|
|
57
76
|
section: `memoryAll`,
|
|
58
77
|
priority: 0,
|
|
59
78
|
recentHours: 0,
|
|
60
|
-
numResults: 0
|
|
79
|
+
numResults: 0,
|
|
80
|
+
stripMetadata: false
|
|
61
81
|
},
|
|
62
82
|
model: 'oai-gpt4o',
|
|
63
83
|
|
|
64
84
|
resolver: async (_parent, args, _contextValue, _info) => {
|
|
65
|
-
const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0 } = args;
|
|
85
|
+
const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0, stripMetadata = false } = args;
|
|
86
|
+
const options = { priority, recentHours, numResults, stripMetadata };
|
|
66
87
|
|
|
67
88
|
// this code helps migrate old memory formats
|
|
68
89
|
if (section === 'memoryLegacy') {
|
|
@@ -70,26 +91,12 @@ export default {
|
|
|
70
91
|
return savedContext.memoryContext || "";
|
|
71
92
|
}
|
|
72
93
|
|
|
73
|
-
const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryContext'];
|
|
94
|
+
const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryContext', 'memoryVersion'];
|
|
74
95
|
|
|
75
96
|
if (section !== 'memoryAll') {
|
|
76
97
|
if (validSections.includes(section)) {
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
if (section === 'memoryTopics') {
|
|
80
|
-
if (recentHours > 0 || numResults > 0) {
|
|
81
|
-
content = filterByRecent(content, recentHours, numResults);
|
|
82
|
-
}
|
|
83
|
-
} else if (priority > 0 || numResults > 0) {
|
|
84
|
-
content = filterByPriority(content, priority, numResults);
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
// Only apply recency filter to memoryTopics
|
|
88
|
-
if (section === 'memoryTopics' && (recentHours > 0 || numResults > 0)) {
|
|
89
|
-
content = filterByRecent(content, recentHours, numResults);
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
return content;
|
|
98
|
+
const content = (getv && (await getv(`${contextId}-${section}`))) || "";
|
|
99
|
+
return processMemoryContent(content, options);
|
|
93
100
|
}
|
|
94
101
|
return "";
|
|
95
102
|
}
|
|
@@ -99,19 +106,10 @@ export default {
|
|
|
99
106
|
for (const section of validSections) {
|
|
100
107
|
if (section === 'memoryContext') continue;
|
|
101
108
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
if (section === 'memoryTopics') {
|
|
105
|
-
if (recentHours > 0 || numResults > 0) {
|
|
106
|
-
content = filterByRecent(content, recentHours, numResults);
|
|
107
|
-
}
|
|
108
|
-
} else if (priority > 0 || numResults > 0) {
|
|
109
|
-
content = filterByPriority(content, priority, numResults);
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
memoryContents[section] = content;
|
|
109
|
+
const content = (getv && (await getv(`${contextId}-${section}`))) || "";
|
|
110
|
+
memoryContents[section] = processMemoryContent(content, options);
|
|
113
111
|
}
|
|
114
|
-
|
|
115
|
-
return
|
|
112
|
+
|
|
113
|
+
return JSON.stringify(memoryContents, null, 2);
|
|
116
114
|
}
|
|
117
115
|
}
|
|
@@ -22,7 +22,7 @@ export default {
|
|
|
22
22
|
return aiMemory;
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
-
const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
|
|
25
|
+
const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryVersion'];
|
|
26
26
|
|
|
27
27
|
// Handle single section save
|
|
28
28
|
if (section !== 'memoryAll') {
|