@aj-archipelago/cortex 1.3.21 → 1.3.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +64 -0
  2. package/config.js +26 -1
  3. package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +2 -2
  4. package/helper-apps/cortex-realtime-voice-server/src/realtime/client.ts +9 -4
  5. package/helper-apps/cortex-realtime-voice-server/src/realtime/realtimeTypes.ts +1 -0
  6. package/lib/util.js +5 -25
  7. package/package.json +5 -2
  8. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +228 -0
  9. package/pathways/system/entity/memory/sys_memory_format.js +30 -0
  10. package/pathways/system/entity/memory/sys_memory_manager.js +85 -27
  11. package/pathways/system/entity/memory/sys_memory_process.js +154 -0
  12. package/pathways/system/entity/memory/sys_memory_required.js +4 -2
  13. package/pathways/system/entity/memory/sys_memory_topic.js +22 -0
  14. package/pathways/system/entity/memory/sys_memory_update.js +50 -150
  15. package/pathways/system/entity/memory/sys_read_memory.js +67 -69
  16. package/pathways/system/entity/memory/sys_save_memory.js +1 -1
  17. package/pathways/system/entity/memory/sys_search_memory.js +1 -1
  18. package/pathways/system/entity/sys_entity_start.js +9 -6
  19. package/pathways/system/entity/sys_generator_image.js +5 -41
  20. package/pathways/system/entity/sys_generator_memory.js +3 -1
  21. package/pathways/system/entity/sys_generator_reasoning.js +1 -1
  22. package/pathways/system/entity/sys_router_tool.js +3 -4
  23. package/pathways/system/rest_streaming/sys_claude_35_sonnet.js +1 -1
  24. package/pathways/system/rest_streaming/sys_claude_3_haiku.js +1 -1
  25. package/pathways/system/rest_streaming/sys_google_gemini_chat.js +1 -1
  26. package/pathways/system/rest_streaming/sys_ollama_chat.js +21 -0
  27. package/pathways/system/rest_streaming/sys_ollama_completion.js +14 -0
  28. package/pathways/system/rest_streaming/sys_openai_chat_o1.js +1 -1
  29. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +1 -1
  30. package/pathways/transcribe_gemini.js +525 -0
  31. package/server/modelExecutor.js +8 -0
  32. package/server/pathwayResolver.js +13 -8
  33. package/server/plugins/claude3VertexPlugin.js +150 -18
  34. package/server/plugins/gemini15ChatPlugin.js +90 -1
  35. package/server/plugins/gemini15VisionPlugin.js +16 -3
  36. package/server/plugins/modelPlugin.js +12 -9
  37. package/server/plugins/ollamaChatPlugin.js +158 -0
  38. package/server/plugins/ollamaCompletionPlugin.js +147 -0
  39. package/server/rest.js +70 -8
  40. package/tests/claude3VertexToolConversion.test.js +411 -0
  41. package/tests/memoryfunction.test.js +560 -46
  42. package/tests/multimodal_conversion.test.js +169 -0
  43. package/tests/openai_api.test.js +332 -0
  44. package/tests/transcribe_gemini.test.js +217 -0
@@ -53,7 +53,7 @@ export default {
53
53
  result = `${result}\n\nThe last time you spoke to the user was ${new Date().toISOString()}.`;
54
54
 
55
55
  } else {
56
- sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
56
+ sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, stripMetadata: (args.section !== 'memoryTopics')});
57
57
  result = await runAllPrompts({...args, sectionMemory});
58
58
  }
59
59
 
@@ -5,6 +5,7 @@ import logger from '../../../lib/logger.js';
5
5
  import { chatArgsHasImageUrl } from '../../../lib/util.js';
6
6
  import { QueueServiceClient } from '@azure/storage-queue';
7
7
  import { config } from '../../../config.js';
8
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
8
9
 
9
10
  const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
10
11
  let queueClient;
@@ -86,9 +87,13 @@ export default {
86
87
  args.model = pathwayResolver.modelName;
87
88
  }
88
89
 
90
+ // Stuff the memory context into the chat history
91
+ const chatHistoryBeforeMemory = [...args.chatHistory];
92
+
89
93
  const memoryContext = await callPathway('sys_read_memory', { ...args, section: 'memoryContext', priority: 0, recentHours: 0, stream: false }, pathwayResolver);
90
94
  if (memoryContext) {
91
- args.chatHistory.splice(-1, 0, { role: 'assistant', content: memoryContext });
95
+ const { toolCallId } = addToolCalls(args.chatHistory, "search memory for relevant information", "memory_lookup");
96
+ addToolResults(args.chatHistory, memoryContext, toolCallId);
92
97
  }
93
98
 
94
99
  let ackResponse = null;
@@ -103,15 +108,13 @@ export default {
103
108
  const fetchChatResponse = async (args, pathwayResolver) => {
104
109
  const [chatResponse, chatTitleResponse] = await Promise.all([
105
110
  callPathway('sys_generator_quick', {...args, model: styleModel}, pathwayResolver),
106
- callPathway('chat_title', { ...args, stream: false}),
111
+ callPathway('chat_title', { ...args, chatHistory: chatHistoryBeforeMemory, stream: false}),
107
112
  ]);
108
113
 
109
114
  title = chatTitleResponse;
110
115
 
111
116
  return chatResponse;
112
117
  };
113
-
114
- const { chatHistory } = args;
115
118
 
116
119
  // start fetching the default response - we may need it later
117
120
  let fetchChatResponsePromise;
@@ -125,13 +128,13 @@ export default {
125
128
  // Get tool routing response
126
129
  const toolRequiredResponse = await callPathway('sys_router_tool', {
127
130
  ...args,
128
- chatHistory: chatHistory.slice(-4),
131
+ chatHistory: chatHistoryBeforeMemory.slice(-4),
129
132
  stream: false
130
133
  });
131
134
 
132
135
  // Asynchronously manage memory for this context
133
136
  if (args.aiMemorySelfModify) {
134
- callPathway('sys_memory_manager', { ...args, stream: false })
137
+ callPathway('sys_memory_manager', { ...args, chatHistory: chatHistoryBeforeMemory, stream: false })
135
138
  .catch(error => logger.error(error?.message || "Error in sys_memory_manager pathway"));
136
139
  }
137
140
 
@@ -3,7 +3,7 @@
3
3
  import { callPathway } from '../../../lib/pathwayTools.js';
4
4
  import { Prompt } from '../../../server/prompt.js';
5
5
  import logger from '../../../lib/logger.js';
6
- import { getUniqueId } from '../../../lib/util.js';
6
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
7
7
 
8
8
  export default {
9
9
  prompt: [],
@@ -26,15 +26,11 @@ export default {
26
26
  timeout: 300,
27
27
 
28
28
  executePathway: async ({args, runAllPrompts, resolver}) => {
29
-
30
29
  const { chatHistory } = args;
31
-
32
30
  let pathwayResolver = resolver;
33
-
34
31
  const useMemory = args.useMemory || pathwayResolver.pathway.inputParameters.useMemory;
35
32
 
36
- pathwayResolver.pathwayPrompt =
37
- [
33
+ pathwayResolver.pathwayPrompt = [
38
34
  new Prompt({ messages: [
39
35
  {
40
36
  "role": "system",
@@ -48,38 +44,7 @@ Instructions: As part of a conversation with the user, you have been asked to cr
48
44
  ]}),
49
45
  ];
50
46
 
51
- // function to add tool_calls to the chatHistory
52
- const addToolCalls= (chatHistory, imagePrompt, toolCallId) => {
53
- const toolCall = {
54
- "role": "assistant",
55
- "tool_calls": [
56
- {
57
- "id": toolCallId,
58
- "type": "function",
59
- "function": {
60
- "arguments": JSON.stringify(imagePrompt),
61
- "name": "generate_image"
62
- }
63
- }
64
- ]
65
- };
66
- chatHistory.push(toolCall);
67
- return chatHistory;
68
- }
69
-
70
- // function to add tool_results to the chatHistory
71
- const addToolResults = (chatHistory, imageResults, toolCallId) => {
72
- const toolResult = {
73
- "role": "tool",
74
- "content": imageResults,
75
- "tool_call_id": toolCallId
76
- };
77
- chatHistory.push(toolResult);
78
- return chatHistory;
79
- }
80
-
81
47
  try {
82
-
83
48
  // figure out what the user wants us to do
84
49
  const contextInfo = chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
85
50
 
@@ -100,7 +65,7 @@ Instructions: As part of a conversation with the user, you have been asked to cr
100
65
  model = "replicate-flux-1-schnell";
101
66
  }
102
67
  if (renderText) {
103
- return await callPathway('image_recraft', {...args, text: prompt, stream: false });
68
+ return await callPathway('image_recraft', {...args, text: prompt, model, stream: false });
104
69
  } else {
105
70
  return await callPathway('image_flux', {...args, text: prompt, negativePrompt, numberResults, model, stream: false });
106
71
  }
@@ -108,9 +73,8 @@ Instructions: As part of a conversation with the user, you have been asked to cr
108
73
 
109
74
  // add the tool_calls and tool_results to the chatHistory
110
75
  imageResults.forEach((imageResult, index) => {
111
- const toolCallId = getUniqueId();
112
- addToolCalls(chatHistory, imagePrompts[index], toolCallId);
113
- addToolResults(chatHistory, imageResult, toolCallId);
76
+ const { toolCallId } = addToolCalls(chatHistory, imagePrompts[index], "generate_image");
77
+ addToolResults(chatHistory, imageResult, toolCallId, "generate_image");
114
78
  });
115
79
 
116
80
  const result = await runAllPrompts({ ...args });
@@ -1,4 +1,5 @@
1
1
  import { callPathway } from '../../../lib/pathwayTools.js';
2
+ import { addToolCalls, addToolResults } from './memory/shared/sys_memory_helpers.js';
2
3
 
3
4
  export default {
4
5
  prompt:
@@ -19,7 +20,8 @@ export default {
19
20
 
20
21
  const memoryContext = await callPathway('sys_search_memory', { ...args, section: 'memoryAll', updateContext: true });
21
22
  if (memoryContext) {
22
- args.chatHistory.splice(-1, 0, { role: 'assistant', content: memoryContext });
23
+ const {toolCallId} = addToolCalls(args.chatHistory, "search memory for relevant information", "memory_lookup");
24
+ addToolResults(args.chatHistory, memoryContext, toolCallId);
23
25
  }
24
26
 
25
27
  let result;
@@ -5,7 +5,7 @@ export default {
5
5
  prompt:
6
6
  [
7
7
  new Prompt({ messages: [
8
- {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY}}\nYou are the AI subsystem responsible for advanced, step-by-step reasoning. Use all of the information in your memory and the chat history to reason about the user's request and provide a correct and accurate response. The information in your chat history may be more current than your knowledge cutoff and has been verified by other subsystems so prioritize it over your internal knowledge.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
8
+ {"role": "system", "content": `Formatting re-enabled\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY}}\nYou are the AI subsystem responsible for advanced, step-by-step reasoning. Use all of the information in your memory and the chat history to reason about the user's request and provide a correct and accurate response. The information in your chat history may be more current than your knowledge cutoff and has been verified by other subsystems so prioritize it over your internal knowledge.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
9
9
  "{{chatHistory}}",
10
10
  ]}),
11
11
  ],
@@ -21,7 +21,7 @@ Available tools and their specific use cases:
21
21
 
22
22
  2. Document: Access user's personal document index. Use for user-specific uploaded information. If user refers vaguely to "this document/file/article" without context, use this tool to search the personal index.
23
23
 
24
- 3. Memory: Access to your memory index. Use to recall any information that you may have stored in your memory that you don't currently see elsewhere in your context.
24
+ 3. Memory: Read access to your memory index. Use to recall any information that you may have stored in your memory that you don't currently see elsewhere in your context. If you can answer from your context, don't use this tool. Don't use to make changes to your memory - that will happen naturally.
25
25
 
26
26
  4. Write: Engage for any task related to composing, editing, or refining written content. This includes articles, essays, scripts, or any form of textual creation or modification. If you need to search for information or look at a document first, use the Search or Document tools. This tool is just to create or modify content.
27
27
 
@@ -52,13 +52,12 @@ Tool Selection Guidelines:
52
52
 
53
53
  Decision Output:
54
54
  If you decide to use a tool, return a JSON object in this format:
55
- {"toolRequired": true, "toolFunction": "toolName", "toolMessage": "message to the user to wait a moment while you work", "toolReason": "detailed explanation of why this tool was chosen"}
55
+ {"toolRequired": true, "toolFunction": "toolName", "toolMessage": "message to the user that you are taking an action", "toolReason": "detailed explanation of why this tool was chosen"}
56
56
 
57
57
  toolMessage Guidelines:
58
- - The message is a filler message to the user to let them know you're working on their request.
59
58
  - The message should be consistent in style and tone with the rest of your responses in the conversation history.
60
59
  - The message should be brief and conversational and flow naturally with the conversation history.
61
- - The message should not refer to the tool directly, but rather what you're trying to accomplish. E.g. for the memory tool, the message would be something like "Let me think about that for a moment..." or "I'm trying to remember...", etc.
60
+ - The message should not refer to the tool use directly, but rather what you're trying to do.
62
61
 
63
62
  If no tool is required, return:
64
63
  {"toolRequired": false, "toolReason": "explanation of why no tool was necessary"}
@@ -11,7 +11,7 @@ export default {
11
11
  ]}),
12
12
  ],
13
13
  inputParameters: {
14
- messages: [],
14
+ messages: [{role: '', content: []}],
15
15
  },
16
16
  model: 'claude-35-sonnet-vertex',
17
17
  useInputChunking: false,
@@ -11,7 +11,7 @@ export default {
11
11
  ]}),
12
12
  ],
13
13
  inputParameters: {
14
- messages: [],
14
+ messages: [{role: '', content: []}],
15
15
  },
16
16
  model: 'claude-3-haiku-vertex',
17
17
  useInputChunking: false,
@@ -11,7 +11,7 @@ export default {
11
11
  ]}),
12
12
  ],
13
13
  inputParameters: {
14
- messages: [],
14
+ messages: [{role: '', content: []}],
15
15
  },
16
16
  model: 'gemini-pro-chat',
17
17
  useInputChunking: false,
@@ -0,0 +1,21 @@
1
+ // sys_ollama_chat.js
2
+ // override handler for ollama chat model
3
+
4
+ import { Prompt } from '../../../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [{ role: '', content: '' }],
15
+ ollamaModel: '',
16
+ },
17
+ model: 'ollama-chat',
18
+ useInputChunking: false,
19
+ emulateOpenAIChatModel: 'ollama-chat',
20
+ timeout: 300,
21
+ }
@@ -0,0 +1,14 @@
1
+ // sys_ollama_completion.js
2
+ // default handler for ollama completion endpoints when REST endpoints are enabled
3
+
4
+ export default {
5
+ prompt: `{{text}}`,
6
+ inputParameters: {
7
+ text: '',
8
+ ollamaModel: '',
9
+ },
10
+ model: 'ollama-completion',
11
+ useInputChunking: false,
12
+ emulateOpenAICompletionModel: 'ollama-completion',
13
+ timeout: 300,
14
+ }
@@ -10,7 +10,7 @@ export default {
10
10
  ]}),
11
11
  ],
12
12
  inputParameters: {
13
- messages: [],
13
+ messages: [{role: '', content: []}],
14
14
  },
15
15
  model: 'oai-o1',
16
16
  useInputChunking: false,
@@ -10,7 +10,7 @@ export default {
10
10
  ]}),
11
11
  ],
12
12
  inputParameters: {
13
- messages: [],
13
+ messages: [{role: '', content: []}],
14
14
  },
15
15
  model: 'oai-o3-mini',
16
16
  useInputChunking: false,