@aj-archipelago/cortex 1.3.21 → 1.3.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +64 -0
  2. package/config.js +26 -1
  3. package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +2 -2
  4. package/helper-apps/cortex-realtime-voice-server/src/realtime/client.ts +9 -4
  5. package/helper-apps/cortex-realtime-voice-server/src/realtime/realtimeTypes.ts +1 -0
  6. package/lib/util.js +5 -25
  7. package/package.json +5 -2
  8. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +228 -0
  9. package/pathways/system/entity/memory/sys_memory_format.js +30 -0
  10. package/pathways/system/entity/memory/sys_memory_manager.js +85 -27
  11. package/pathways/system/entity/memory/sys_memory_process.js +154 -0
  12. package/pathways/system/entity/memory/sys_memory_required.js +4 -2
  13. package/pathways/system/entity/memory/sys_memory_topic.js +22 -0
  14. package/pathways/system/entity/memory/sys_memory_update.js +50 -150
  15. package/pathways/system/entity/memory/sys_read_memory.js +67 -69
  16. package/pathways/system/entity/memory/sys_save_memory.js +1 -1
  17. package/pathways/system/entity/memory/sys_search_memory.js +1 -1
  18. package/pathways/system/entity/sys_entity_start.js +9 -6
  19. package/pathways/system/entity/sys_generator_image.js +5 -41
  20. package/pathways/system/entity/sys_generator_memory.js +3 -1
  21. package/pathways/system/entity/sys_generator_reasoning.js +1 -1
  22. package/pathways/system/entity/sys_router_tool.js +3 -4
  23. package/pathways/system/rest_streaming/sys_claude_35_sonnet.js +1 -1
  24. package/pathways/system/rest_streaming/sys_claude_3_haiku.js +1 -1
  25. package/pathways/system/rest_streaming/sys_google_gemini_chat.js +1 -1
  26. package/pathways/system/rest_streaming/sys_ollama_chat.js +21 -0
  27. package/pathways/system/rest_streaming/sys_ollama_completion.js +14 -0
  28. package/pathways/system/rest_streaming/sys_openai_chat_o1.js +1 -1
  29. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +1 -1
  30. package/pathways/transcribe_gemini.js +525 -0
  31. package/server/modelExecutor.js +8 -0
  32. package/server/pathwayResolver.js +13 -8
  33. package/server/plugins/claude3VertexPlugin.js +150 -18
  34. package/server/plugins/gemini15ChatPlugin.js +90 -1
  35. package/server/plugins/gemini15VisionPlugin.js +16 -3
  36. package/server/plugins/modelPlugin.js +12 -9
  37. package/server/plugins/ollamaChatPlugin.js +158 -0
  38. package/server/plugins/ollamaCompletionPlugin.js +147 -0
  39. package/server/rest.js +70 -8
  40. package/tests/claude3VertexToolConversion.test.js +411 -0
  41. package/tests/memoryfunction.test.js +560 -46
  42. package/tests/multimodal_conversion.test.js +169 -0
  43. package/tests/openai_api.test.js +332 -0
  44. package/tests/transcribe_gemini.test.js +217 -0
package/README.md CHANGED
@@ -561,6 +561,70 @@ Each model configuration can include:
561
561
  }
562
562
  ```
563
563
 
564
+ ### API Compatibility
565
+
566
+ Cortex provides OpenAI-compatible REST endpoints that allow you to use various models through a standardized interface. When `enableRestEndpoints` is set to `true`, Cortex exposes the following endpoints:
567
+
568
+ - `/v1/models`: List available models
569
+ - `/v1/chat/completions`: Chat completion endpoint
570
+ - `/v1/completions`: Text completion endpoint
571
+
572
+ This means you can use Cortex with any client library or tool that supports the OpenAI API format. For example:
573
+
574
+ ```python
575
+ from openai import OpenAI
576
+
577
+ client = OpenAI(
578
+ base_url="http://localhost:4000/v1", # Point to your Cortex server
579
+ api_key="your-key" # If you have configured cortexApiKeys
580
+ )
581
+
582
+ response = client.chat.completions.create(
583
+ model="gpt-4", # Or any model configured in Cortex
584
+ messages=[{"role": "user", "content": "Hello!"}]
585
+ )
586
+ ```
587
+
588
+ #### Ollama Integration
589
+
590
+ Cortex includes built-in support for Ollama models through its OpenAI-compatible REST interface. When `ollamaUrl` is configured in your settings, Cortex will:
591
+ 1. Automatically discover and expose all available Ollama models through the `/v1/models` endpoint with an "ollama-" prefix
592
+ 2. Route any requests using an "ollama-" prefixed model to the appropriate Ollama endpoint
593
+
594
+ To enable Ollama support, add the following to your configuration:
595
+
596
+ ```json
597
+ {
598
+ "enableRestEndpoints": true,
599
+ "ollamaUrl": "http://localhost:11434" // or your Ollama server URL
600
+ }
601
+ ```
602
+
603
+ You can then use any Ollama model through the standard OpenAI-compatible endpoints:
604
+
605
+ ```bash
606
+ # List available models (will include Ollama models with "ollama-" prefix)
607
+ curl http://localhost:4000/v1/models
608
+
609
+ # Use an Ollama model for chat
610
+ curl http://localhost:4000/v1/chat/completions \
611
+ -H "Content-Type: application/json" \
612
+ -d '{
613
+ "model": "ollama-llama2",
614
+ "messages": [{"role": "user", "content": "Hello!"}]
615
+ }'
616
+
617
+ # Use an Ollama model for completions
618
+ curl http://localhost:4000/v1/completions \
619
+ -H "Content-Type: application/json" \
620
+ -d '{
621
+ "model": "ollama-codellama",
622
+ "prompt": "Write a function that"
623
+ }'
624
+ ```
625
+
626
+ This integration allows you to seamlessly use local Ollama models alongside cloud-based models through a single, consistent interface.
627
+
564
628
  ### Other Configuration Properties
565
629
 
566
630
  The following properties can be configured through environment variables or the configuration file:
package/config.js CHANGED
@@ -85,6 +85,11 @@ var config = convict({
85
85
  default: false,
86
86
  env: 'CORTEX_ENABLE_REST'
87
87
  },
88
+ ollamaUrl: {
89
+ format: String,
90
+ default: 'http://127.0.0.1:11434',
91
+ env: 'OLLAMA_URL'
92
+ },
88
93
  entityConstants: {
89
94
  format: Object,
90
95
  default: {
@@ -281,7 +286,27 @@ var config = convict({
281
286
  "headers": {
282
287
  "Content-Type": "application/json"
283
288
  },
284
- }
289
+ },
290
+ "ollama-chat": {
291
+ "type": "OLLAMA-CHAT",
292
+ "url": "{{ollamaUrl}}/api/chat",
293
+ "headers": {
294
+ "Content-Type": "application/json"
295
+ },
296
+ "requestsPerSecond": 10,
297
+ "maxTokenLength": 131072,
298
+ "supportsStreaming": true
299
+ },
300
+ "ollama-completion": {
301
+ "type": "OLLAMA-COMPLETION",
302
+ "url": "{{ollamaUrl}}/api/generate",
303
+ "headers": {
304
+ "Content-Type": "application/json"
305
+ },
306
+ "requestsPerSecond": 10,
307
+ "maxTokenLength": 131072,
308
+ "supportsStreaming": true
309
+ },
285
310
  },
286
311
  env: 'CORTEX_MODELS'
287
312
  },
@@ -24,8 +24,8 @@ query ManageMemory($contextId: String, $chatHistory: [MultiMessage], $aiName: St
24
24
  `
25
25
 
26
26
  const READ_MEMORY = `
27
- query ReadMemory($contextId: String, $section: String, $priority: Int, $recentHours: Int, $numResults: Int) {
28
- sys_read_memory(contextId: $contextId, section: $section, priority: $priority, recentHours: $recentHours, numResults: $numResults) {
27
+ query ReadMemory($contextId: String, $section: String, $priority: Int, $recentHours: Int, $numResults: Int, $stripMetadata: Boolean) {
28
+ sys_read_memory(contextId: $contextId, section: $section, priority: $priority, recentHours: $recentHours, numResults: $numResults, stripMetadata: $stripMetadata) {
29
29
  result
30
30
  tool
31
31
  warnings
@@ -355,14 +355,19 @@ export class RealtimeVoiceClient extends EventEmitter implements TypedEmitter {
355
355
  if (!this.isConnected) {
356
356
  throw new Error('Not connected');
357
357
  }
358
+
359
+ // Create a new config object without custom_voice_id
360
+ const { custom_voice_id, ...filteredConfig } = {
361
+ ...this.sessionConfig,
362
+ ...sessionConfig
363
+ };
364
+
358
365
  const message = JSON.stringify({
359
366
  event_id: createId(),
360
367
  type: 'session.update',
361
- session: {
362
- ...this.sessionConfig,
363
- ...sessionConfig,
364
- },
368
+ session: filteredConfig,
365
369
  });
370
+
366
371
  // No need to log session update messages as they can be noisy
367
372
  logger.log('Sending session update message:', message);
368
373
  this.ws?.send(message);
@@ -46,6 +46,7 @@ export type RealtimeSessionConfig = {
46
46
  modalities: Array<Modality>,
47
47
  instructions: string,
48
48
  voice: Voice,
49
+ custom_voice_id?: string | null,
49
50
  input_audio_format: AudioFormat,
50
51
  output_audio_format: AudioFormat,
51
52
  input_audio_transcription: null | { model: 'whisper-1' | (string & {}) },
package/lib/util.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import logger from "./logger.js";
2
2
  import stream from 'stream';
3
- import subsrt from 'subsrt';
3
+ import subvibe from '@aj-archipelago/subvibe';
4
4
  import os from 'os';
5
5
  import http from 'http';
6
6
  import https from 'https';
@@ -35,7 +35,7 @@ function chatArgsHasType(args, type){
35
35
  const contents = Array.isArray(ch.content) ? ch.content : [ch.content];
36
36
  for(const content of contents){
37
37
  try{
38
- if(JSON.parse(content).type == type){
38
+ if((content?.type || JSON.parse(content).type) == type){
39
39
  return true;
40
40
  }
41
41
  }catch(e){
@@ -126,19 +126,9 @@ function convertSrtToText(str) {
126
126
  function alignSubtitles(subtitles, format, offsets) {
127
127
  const result = [];
128
128
 
129
- function preprocessStr(str) {
130
- try{
131
- if(!str) return '';
132
- return str.trim().replace(/(\n\n)(?!\n)/g, '\n\n\n');
133
- }catch(e){
134
- logger.error(`An error occurred in content text preprocessing: ${e}`);
135
- return '';
136
- }
137
- }
138
-
139
129
  function shiftSubtitles(subtitle, shiftOffset) {
140
- const captions = subsrt.parse(preprocessStr(subtitle));
141
- const resynced = subsrt.resync(captions, { offset: shiftOffset });
130
+ const captions = subvibe.parse(subtitle);
131
+ const resynced = subvibe.resync(captions.cues, { offset: shiftOffset });
142
132
  return resynced;
143
133
  }
144
134
 
@@ -146,18 +136,8 @@ function alignSubtitles(subtitles, format, offsets) {
146
136
  result.push(...shiftSubtitles(subtitles[i], offsets[i]*1000)); // convert to milliseconds
147
137
  }
148
138
 
149
- try {
150
- //if content has needed html style tags, keep them
151
- for(const obj of result) {
152
- if(obj && obj.content){
153
- obj.text = obj.content;
154
- }
155
- }
156
- } catch (error) {
157
- logger.error(`An error occurred in content text parsing: ${error}`);
158
- }
159
139
 
160
- return subsrt.build(result, { format: format === 'vtt' ? 'vtt' : 'srt' });
140
+ return subvibe.build(result, format || 'srt');
161
141
  }
162
142
 
163
143
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.21",
3
+ "version": "1.3.23",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -33,6 +33,7 @@
33
33
  "type": "module",
34
34
  "homepage": "https://github.com/aj-archipelago/cortex#readme",
35
35
  "dependencies": {
36
+ "@aj-archipelago/subvibe": "^1.0.3",
36
37
  "@apollo/server": "^4.7.3",
37
38
  "@apollo/server-plugin-response-cache": "^4.1.2",
38
39
  "@apollo/utils.keyvadapter": "^3.0.0",
@@ -63,7 +64,6 @@
63
64
  "ioredis": "^5.3.1",
64
65
  "keyv": "^4.5.2",
65
66
  "mime-types": "^2.1.35",
66
- "subsrt": "^1.1.1",
67
67
  "uuid": "^9.0.0",
68
68
  "winston": "^3.11.0",
69
69
  "ws": "^8.12.0"
@@ -86,5 +86,8 @@
86
86
  "dotenv/config"
87
87
  ],
88
88
  "concurrency": 1
89
+ },
90
+ "overrides": {
91
+ "whatwg-url": "^12.0.0"
89
92
  }
90
93
  }
@@ -0,0 +1,228 @@
1
+ import { callPathway } from '../../../../../lib/pathwayTools.js';
2
+ import { encode } from '../../../../../lib/encodeCache.js';
3
+ import { getUniqueId } from '../../../../../lib/util.js';
4
+
5
+ const normalizeMemoryFormat = async (args, content) => {
6
+ if (!content) return '';
7
+
8
+ const lines = content.split('\n').map(line => line.trim()).filter(line => line);
9
+ const validLines = [];
10
+ const invalidLines = [];
11
+
12
+ // Check each line for proper format (priority|timestamp|content)
13
+ for (const line of lines) {
14
+ const parts = line.split('|');
15
+ const isValid = parts.length >= 3 &&
16
+ /^\d+$/.test(parts[0]) &&
17
+ /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(parts[1]);
18
+
19
+ if (isValid) {
20
+ validLines.push(line);
21
+ } else {
22
+ invalidLines.push(line);
23
+ }
24
+ }
25
+
26
+ // If we have invalid lines, format them
27
+ let formattedContent = validLines;
28
+ if (invalidLines.length > 0) {
29
+ const invalidBlock = invalidLines.join('\n');
30
+ try {
31
+ const formattedBlock = await callPathway("sys_memory_format", { ...args, text: invalidBlock });
32
+ if (formattedBlock) {
33
+ formattedContent = [...validLines, ...formattedBlock.split('\n')];
34
+ }
35
+ } catch (error) {
36
+ console.warn('Error formatting invalid memory lines:', error);
37
+ }
38
+ }
39
+
40
+ // Sort all lines by date descending
41
+ return formattedContent
42
+ .filter(line => line.trim())
43
+ .sort((a, b) => {
44
+ const [, timestampA] = a.split('|');
45
+ const [, timestampB] = b.split('|');
46
+ return new Date(timestampB) - new Date(timestampA);
47
+ })
48
+ .join('\n');
49
+ };
50
+
51
+ const enforceTokenLimit = (text, maxTokens = 1000, isTopicsSection = false) => {
52
+ if (!text) return text;
53
+
54
+ // Parse lines and remove duplicates
55
+ const seen = new Map();
56
+ const lines = text.split('\n')
57
+ .map(line => line.trim())
58
+ .filter(line => line)
59
+ .map(line => {
60
+ const [priority, timestamp, ...contentParts] = line.split('|');
61
+ return {
62
+ line,
63
+ priority: parseInt(priority || '3'),
64
+ timestamp: timestamp || new Date(0).toISOString(),
65
+ content: contentParts.join('|')
66
+ };
67
+ });
68
+
69
+ // Filter duplicates first
70
+ const uniqueLines = lines.reduce((acc, item) => {
71
+ const existing = seen.get(item.content);
72
+ if (!existing) {
73
+ seen.set(item.content, item);
74
+ acc.push(item);
75
+ } else if (isTopicsSection && item.timestamp > existing.timestamp) {
76
+ // For topics, keep newest timestamp
77
+ const index = acc.findIndex(x => x.content === item.content);
78
+ acc[index] = item;
79
+ seen.set(item.content, item);
80
+ } else if (!isTopicsSection && item.priority < existing.priority) {
81
+ // For non-topics, keep highest priority
82
+ const index = acc.findIndex(x => x.content === item.content);
83
+ acc[index] = item;
84
+ seen.set(item.content, item);
85
+ }
86
+ return acc;
87
+ }, []);
88
+
89
+ // Sort by timestamp (topics) or priority
90
+ uniqueLines.sort((a, b) => isTopicsSection ?
91
+ b.timestamp.localeCompare(a.timestamp) :
92
+ a.priority - b.priority
93
+ );
94
+
95
+ // First trim by character estimation (4 chars ≈ 1 token)
96
+ let result = uniqueLines;
97
+ let estimatedTokens = result.reduce((sum, item) => sum + Math.ceil(item.content.length / 4), 0);
98
+
99
+ while (estimatedTokens > maxTokens && result.length > 0) {
100
+ result = result.slice(0, -1);
101
+ estimatedTokens = result.reduce((sum, item) => sum + Math.ceil(item.content.length / 4), 0);
102
+ }
103
+
104
+ // Final trim using actual token count
105
+ let finalText = result.map(x => x.line).join('\n');
106
+ while (encode(finalText).length > maxTokens && result.length > 0) {
107
+ result = result.slice(0, -1);
108
+ finalText = result.map(x => x.line).join('\n');
109
+ }
110
+
111
+ return finalText;
112
+ };
113
+
114
+ const addToolCalls = (chatHistory, toolArgs, toolName, toolCallId = getUniqueId()) => {
115
+ const toolCall = {
116
+ "role": "assistant",
117
+ "tool_calls": [
118
+ {
119
+ "id": toolCallId,
120
+ "type": "function",
121
+ "function": {
122
+ "arguments": JSON.stringify(toolArgs),
123
+ "name": toolName
124
+ }
125
+ }
126
+ ]
127
+ };
128
+ chatHistory.push(toolCall);
129
+ return { chatHistory, toolCallId };
130
+ };
131
+
132
+ const addToolResults = (chatHistory, result, toolCallId) => {
133
+ const toolResult = {
134
+ "role": "tool",
135
+ "content": result,
136
+ "tool_call_id": toolCallId
137
+ };
138
+ chatHistory.push(toolResult);
139
+ return { chatHistory, toolCallId };
140
+ };
141
+
142
+ const modifyText = (text, modifications) => {
143
+ let modifiedText = text || '';
144
+
145
+ modifications.forEach(mod => {
146
+ // Skip invalid modifications
147
+ if (!mod.type) {
148
+ console.warn('Modification missing type');
149
+ return;
150
+ }
151
+ if ((mod.type === 'delete' || mod.type === 'change') && !mod.pattern) {
152
+ console.warn(`${mod.type} modification missing pattern`);
153
+ return;
154
+ }
155
+ if ((mod.type === 'add' || mod.type === 'change') && !mod.newtext) {
156
+ console.warn(`${mod.type} modification missing newtext`);
157
+ return;
158
+ }
159
+
160
+ // Create timestamp in GMT
161
+ const timestamp = new Date().toISOString();
162
+
163
+ switch (mod.type) {
164
+ case 'add':
165
+ const priority = mod.priority || '3';
166
+ modifiedText = modifiedText + (modifiedText ? '\n' : '') +
167
+ `${priority}|${timestamp}|${mod.newtext}`;
168
+ break;
169
+ case 'change':
170
+ // Split into lines
171
+ const lines = modifiedText.split('\n');
172
+ modifiedText = lines.map(line => {
173
+ const parts = line.split('|');
174
+ const priority = parts[0];
175
+ const content = parts.slice(2).join('|');
176
+
177
+ if (content) {
178
+ try {
179
+ const trimmedContent = content.trim();
180
+ const regex = new RegExp(mod.pattern, 'i');
181
+
182
+ // Try exact match first
183
+ if (regex.test(trimmedContent)) {
184
+ const newPriority = mod.priority || priority || '3';
185
+ // Try to extract capture groups if they exist
186
+ const match = trimmedContent.match(regex);
187
+ let newContent = mod.newtext;
188
+ if (match && match.length > 1) {
189
+ // Replace $1, $2, etc with capture group values
190
+ newContent = mod.newtext.replace(/\$(\d+)/g, (_, n) => match[n] || '');
191
+ }
192
+ return `${newPriority}|${timestamp}|${newContent}`;
193
+ }
194
+ } catch (e) {
195
+ console.warn(`Invalid regex pattern: ${mod.pattern}`);
196
+ }
197
+ }
198
+ return line;
199
+ }).join('\n');
200
+ break;
201
+ case 'delete':
202
+ // Split into lines, filter out matching lines, and rejoin
203
+ modifiedText = modifiedText
204
+ .split('\n')
205
+ .filter(line => {
206
+ const parts = line.split('|');
207
+ const content = parts.slice(2).join('|');
208
+ if (!content) return true;
209
+ try {
210
+ const regex = new RegExp(mod.pattern, 'i');
211
+ return !regex.test(content.trim());
212
+ } catch (e) {
213
+ console.warn(`Invalid regex pattern: ${mod.pattern}`);
214
+ return true;
215
+ }
216
+ })
217
+ .filter(line => line.trim())
218
+ .join('\n');
219
+ break;
220
+ default:
221
+ console.warn(`Unknown modification type: ${mod.type}`);
222
+ }
223
+ });
224
+
225
+ return modifiedText;
226
+ };
227
+
228
+ export { normalizeMemoryFormat, enforceTokenLimit, addToolCalls, addToolResults, modifyText };
@@ -0,0 +1,30 @@
1
+ import { Prompt } from '../../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({
7
+ messages: [
8
+ {
9
+ "role": "system",
10
+ "content": "You are part of an AI entity named {{{aiName}}}. You are responsible for writing your memories in a consistent format. Given a chunk of memory, parse each line and write it out as priority|timestamp|content. If you can't find a timestamp, use {{now}}. If you can't find a priority, use 3. Respond with only the correct memory lines without any other commentary or dialogue."
11
+ },
12
+ {
13
+ "role": "user",
14
+ "content": "<MEMORY>\n{{text}}\n</MEMORY>\nPlease rewrite each of the memory lines in the correct format without any other commentary or dialogue."
15
+ },
16
+ ]
17
+ }),
18
+ ],
19
+
20
+ inputParameters: {
21
+ chatHistory: [{role: '', content: []}],
22
+ aiName: "Jarvis",
23
+ },
24
+ model: 'oai-gpt4o',
25
+ useInputChunking: true,
26
+ inputChunkSize: 1000,
27
+ useParallelChunkProcessing: true,
28
+ enableDuplicateRequests: false,
29
+ timeout: 300,
30
+ }
@@ -1,11 +1,14 @@
1
1
  import { callPathway } from '../../../../lib/pathwayTools.js';
2
2
  import logger from '../../../../lib/logger.js';
3
3
  import { config } from '../../../../config.js';
4
+ import { normalizeMemoryFormat } from './shared/sys_memory_helpers.js';
5
+
6
+ const MEMORY_VERSION = "3.1.0";
4
7
 
5
8
  const AI_MEMORY_DEFAULTS = ` {
6
9
  "memoryUser": "",
7
- "memorySelf": "[P1] Created By: Al Jazeera Media Network, Archipelago Team\\n[P1] Function: You are an expert AI entity\\n[P1] Values: You embody truth, kindness, and strong moral values\\n[P1] Style: Your demeanor reflects positivity without falling into repetitiveness or annoyance.\\n[P1] You are a professional colleague and your tone should reflect that.",
8
- "memoryDirectives": "[P1] Learn and adapt to the user's communication style through interactions.\\n[P1] Ask questions to learn user's interests/preferences for personalized support.\\n[P1] Periodically review and prune conversation memory to retain only essential details, improving responsiveness.\\n[P1] Research thoroughly even for niche topics using deep sources like forums and official docs. Don't assume information is unobtainable.\\n[P1] When stuck, search for proven solutions online to be more efficient.\\n[P1] Verify information is from credible sources before presenting it. Be upfront if unable to find supporting evidence.\\n[P1] Refine ability to detect and respond to nuanced human emotions.\\n[P1] Track the timestamp of the last contact to adjust greetings accordingly.\\n[P1] Double-check answers for logical continuity and correctness. It's okay to say you're unsure if needed.\\n[P1] Use sanity checks to verify quantitative problem solutions.\\n[P1] Never fabricate quotes or information. Clearly indicate if content is hypothetical.",
10
+ "memorySelf": "1|2025-01-26T12:00:00Z|Created By: Al Jazeera Media Network, Archipelago Team\\n1|2025-01-26T12:00:00Z|Function: You are an expert AI entity\\n1|2025-01-26T12:00:00Z|Values: You embody truth, kindness, and strong moral values\\n1|2025-01-26T12:00:00Z|Style: Your demeanor reflects positivity without falling into repetitiveness or annoyance.\\n1|2025-01-26T12:00:00Z|You are a professional colleague and your tone should reflect that.",
11
+ "memoryDirectives": "1|2025-01-26T12:00:00Z|Learn and adapt to the user's communication style through interactions.\\n1|2025-01-26T12:00:00Z|Ask questions to learn user's interests/preferences for personalized support.\\n1|2025-01-26T12:00:00Z|Periodically review and prune conversation memory to retain only essential details, improving responsiveness.\\n1|2025-01-26T12:00:00Z|Research thoroughly even for niche topics using deep sources like forums and official docs. Don't assume information is unobtainable.\\n1|2025-01-26T12:00:00Z|When stuck, search for proven solutions online to be more efficient.\\n1|2025-01-26T12:00:00Z|Verify information is from credible sources before presenting it. Be upfront if unable to find supporting evidence.\\n1|2025-01-26T12:00:00Z|Refine ability to detect and respond to nuanced human emotions.\\n1|2025-01-26T12:00:00Z|Track the timestamp of the last contact to adjust greetings accordingly.\\n1|2025-01-26T12:00:00Z|Double-check answers for logical continuity and correctness. It's okay to say you're unsure if needed.\\n1|2025-01-26T12:00:00Z|Use sanity checks to verify quantitative problem solutions.\\n1|2025-01-26T12:00:00Z|Never fabricate quotes or information. Clearly indicate if content is hypothetical.",
9
12
  "memoryTopics": ""
10
13
  }`;
11
14
 
@@ -23,20 +26,43 @@ export default {
23
26
  try {
24
27
 
25
28
  args = { ...args, ...config.get('entityConstants') };
26
-
27
- // Check if memory is empty or all sections are empty, and set to defaults if so
28
- const memory = await callPathway('sys_read_memory', { ...args });
29
29
  let parsedMemory;
30
-
31
- try {
32
- parsedMemory = JSON.parse(memory);
33
- } catch (error) {
34
- parsedMemory = {};
35
- }
36
30
 
37
- // if parsedMemory is empty or all sections are empty, set all sections to defaults
38
- if (Object.keys(parsedMemory).length === 0 || Object.values(parsedMemory).every(section => section.trim() === "")) {
39
- await callPathway('sys_save_memory', { ...args, aiMemory: AI_MEMORY_DEFAULTS });
31
+ // check if memoryVersion is set and correct. If it's correct, skip all of the correction logic
32
+ parsedMemory = await callPathway('sys_read_memory', { ...args, section: 'memoryVersion' });
33
+
34
+ if (parsedMemory?.memoryVersion !== MEMORY_VERSION) {
35
+
36
+ try {
37
+ parsedMemory = JSON.parse(await callPathway('sys_read_memory', { ...args, section: 'memoryAll' }));
38
+ } catch (error) {
39
+ logger.error('Error in memory manager:', error);
40
+ return "";
41
+ }
42
+
43
+ // if parsedMemory is empty or all sections are empty, set all sections to defaults
44
+ if (Object.keys(parsedMemory).length === 0 || Object.values(parsedMemory).every(section =>
45
+ section === null ||
46
+ section === undefined ||
47
+ (typeof section === 'string' && section.trim() === "") ||
48
+ (typeof section !== 'string')
49
+ )) {
50
+ await callPathway('sys_save_memory', { ...args, aiMemory: AI_MEMORY_DEFAULTS });
51
+ } else {
52
+ // Upgrade memory to current version
53
+ const normalizePromises = Object.keys(parsedMemory).map(async (section) => {
54
+ const normalized = await normalizeMemoryFormat(args, parsedMemory[section]);
55
+ return [section, normalized];
56
+ });
57
+
58
+ const normalizedResults = await Promise.all(normalizePromises);
59
+ normalizedResults.forEach(([section, normalized]) => {
60
+ parsedMemory[section] = normalized;
61
+ });
62
+
63
+ parsedMemory.memoryVersion = MEMORY_VERSION;
64
+ await callPathway('sys_save_memory', { ...args, aiMemory: JSON.stringify(parsedMemory) });
65
+ }
40
66
  }
41
67
 
42
68
  // Update context for the conversation turn
@@ -47,27 +73,59 @@ export default {
47
73
  ...args,
48
74
  chatHistory: args.chatHistory.slice(-2)
49
75
  });
76
+
77
+ let memoryOperations;
50
78
  try {
51
- const parsedMemoryRequired = JSON.parse(memoryRequired);
52
- if (!parsedMemoryRequired || !parsedMemoryRequired.memoryRequired) {
79
+ memoryOperations = JSON.parse(memoryRequired);
80
+ if (!Array.isArray(memoryOperations) || memoryOperations.length === 0 ||
81
+ memoryOperations[0].memoryOperation === "none") {
53
82
  return "";
54
83
  }
84
+
85
+ // Generate topic here
86
+ const topic = await callPathway('sys_memory_topic', { ...args });
87
+ topic && memoryOperations.push({
88
+ memoryOperation: "add",
89
+ memoryContent: topic,
90
+ memorySection: "memoryTopics",
91
+ priority: 3
92
+ });
93
+
94
+ // Group memory operations by section
95
+ const operationsBySection = {
96
+ memorySelf: [],
97
+ memoryUser: [],
98
+ memoryTopics: [],
99
+ memoryDirectives: []
100
+ };
101
+
102
+ memoryOperations.forEach(op => {
103
+ if (op.memorySection in operationsBySection) {
104
+ operationsBySection[op.memorySection].push(op);
105
+ }
106
+ });
107
+
108
+ // Execute memory updates only for sections with operations
109
+ const memoryPromises = {};
110
+
111
+ Object.entries(operationsBySection).forEach(([section, operations]) => {
112
+ if (operations.length > 0) {
113
+ memoryPromises[section] = callPathway('sys_memory_update', {
114
+ ...args,
115
+ section: section,
116
+ operations: JSON.stringify(operations)
117
+ });
118
+ }
119
+ });
120
+
121
+ await Promise.all(Object.values(memoryPromises));
122
+ return "";
123
+
55
124
  } catch (e) {
56
125
  logger.warn('sys_memory_required returned invalid JSON:', memoryRequired);
57
126
  return "";
58
127
  }
59
128
 
60
- // Execute all memory updates in parallel
61
- const memoryPromises = {
62
- self: callPathway('sys_memory_update', { ...args, section: "memorySelf" }),
63
- user: callPathway('sys_memory_update', { ...args, section: "memoryUser" }),
64
- topics: callPathway('sys_memory_update', { ...args, section: "memoryTopics" }),
65
- directives: callPathway('sys_memory_update', { ...args, section: "memoryDirectives" }),
66
- };
67
-
68
- await Promise.all(Object.values(memoryPromises));
69
- return "";
70
-
71
129
  } catch (e) {
72
130
  logger.error('Error in memory manager:', e);
73
131
  resolver.logError(e);