@aj-archipelago/cortex 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/config.js +47 -11
  2. package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +2 -1
  3. package/helper-apps/cortex-autogen/agents.py +387 -0
  4. package/helper-apps/cortex-autogen/agents_extra.py +14 -0
  5. package/helper-apps/cortex-autogen/config.py +18 -0
  6. package/helper-apps/cortex-autogen/data_operations.py +29 -0
  7. package/helper-apps/cortex-autogen/function_app.py +6 -3
  8. package/helper-apps/cortex-autogen/main.py +4 -4
  9. package/helper-apps/cortex-autogen/prompts.py +196 -0
  10. package/helper-apps/cortex-autogen/prompts_extra.py +5 -0
  11. package/helper-apps/cortex-autogen/requirements.txt +2 -1
  12. package/helper-apps/cortex-autogen/search.py +83 -0
  13. package/helper-apps/cortex-autogen/test.sh +40 -0
  14. package/helper-apps/cortex-autogen/utils.py +78 -0
  15. package/lib/handleBars.js +25 -0
  16. package/lib/logger.js +2 -0
  17. package/lib/util.js +3 -1
  18. package/package.json +1 -1
  19. package/pathways/chat_code.js +1 -1
  20. package/pathways/chat_context.js +1 -1
  21. package/pathways/chat_jarvis.js +1 -1
  22. package/pathways/chat_persist.js +1 -1
  23. package/pathways/chat_title.js +25 -0
  24. package/pathways/{flux_image.js → image_flux.js} +6 -2
  25. package/pathways/image_recraft.js +10 -0
  26. package/pathways/rag.js +1 -1
  27. package/pathways/rag_jarvis.js +1 -1
  28. package/pathways/rag_search_helper.js +1 -1
  29. package/pathways/system/entity/memory/sys_memory_manager.js +71 -0
  30. package/pathways/system/entity/memory/sys_memory_required.js +21 -0
  31. package/pathways/system/entity/memory/sys_memory_update.js +190 -0
  32. package/pathways/system/entity/memory/sys_read_memory.js +37 -0
  33. package/pathways/system/entity/memory/sys_save_memory.js +60 -0
  34. package/pathways/system/entity/shared/sys_entity_constants.js +24 -0
  35. package/pathways/system/entity/sys_entity_continue.js +57 -0
  36. package/pathways/system/entity/sys_entity_start.js +218 -0
  37. package/pathways/system/entity/sys_generator_error.js +20 -0
  38. package/pathways/system/entity/sys_generator_expert.js +26 -0
  39. package/pathways/system/entity/sys_generator_image.js +127 -0
  40. package/pathways/system/entity/sys_generator_quick.js +19 -0
  41. package/pathways/system/entity/sys_generator_reasoning.js +27 -0
  42. package/pathways/system/entity/sys_generator_results.js +304 -0
  43. package/pathways/system/entity/sys_generator_video_vision.js +27 -0
  44. package/pathways/system/entity/sys_image_prompt_builder.js +35 -0
  45. package/pathways/system/entity/sys_query_builder.js +101 -0
  46. package/pathways/system/entity/sys_router_code.js +37 -0
  47. package/pathways/system/entity/sys_router_tool.js +64 -0
  48. package/pathways/{sys_claude_35_sonnet.js → system/rest_streaming/sys_claude_35_sonnet.js} +1 -1
  49. package/pathways/{sys_claude_3_haiku.js → system/rest_streaming/sys_claude_3_haiku.js} +1 -1
  50. package/pathways/{sys_google_chat.js → system/rest_streaming/sys_google_chat.js} +1 -1
  51. package/pathways/{sys_google_code_chat.js → system/rest_streaming/sys_google_code_chat.js} +1 -1
  52. package/pathways/{sys_google_gemini_chat.js → system/rest_streaming/sys_google_gemini_chat.js} +1 -1
  53. package/pathways/{sys_openai_chat.js → system/rest_streaming/sys_openai_chat.js} +1 -1
  54. package/pathways/{sys_openai_chat_16.js → system/rest_streaming/sys_openai_chat_16.js} +1 -1
  55. package/pathways/{sys_openai_chat_gpt4.js → system/rest_streaming/sys_openai_chat_gpt4.js} +1 -1
  56. package/pathways/{sys_openai_chat_gpt4_32.js → system/rest_streaming/sys_openai_chat_gpt4_32.js} +1 -1
  57. package/pathways/{sys_openai_chat_gpt4_turbo.js → system/rest_streaming/sys_openai_chat_gpt4_turbo.js} +1 -1
  58. package/pathways/{sys_parse_numbered_object_list.js → system/sys_parse_numbered_object_list.js} +2 -2
  59. package/pathways/{sys_repair_json.js → system/sys_repair_json.js} +1 -1
  60. package/pathways/{run_claude35_sonnet.js → system/workspaces/run_claude35_sonnet.js} +1 -1
  61. package/pathways/{run_claude3_haiku.js → system/workspaces/run_claude3_haiku.js} +1 -1
  62. package/pathways/{run_gpt35turbo.js → system/workspaces/run_gpt35turbo.js} +1 -1
  63. package/pathways/{run_gpt4.js → system/workspaces/run_gpt4.js} +1 -1
  64. package/pathways/{run_gpt4_32.js → system/workspaces/run_gpt4_32.js} +1 -1
  65. package/server/parser.js +6 -1
  66. package/server/pathwayResolver.js +62 -10
  67. package/server/plugins/azureCognitivePlugin.js +14 -1
  68. package/server/plugins/claude3VertexPlugin.js +25 -15
  69. package/server/plugins/gemini15ChatPlugin.js +1 -1
  70. package/server/plugins/geminiChatPlugin.js +1 -1
  71. package/server/plugins/modelPlugin.js +10 -1
  72. package/server/plugins/openAiChatPlugin.js +4 -3
  73. package/server/plugins/openAiDallE3Plugin.js +12 -4
  74. package/server/plugins/openAiVisionPlugin.js +1 -2
  75. package/server/plugins/replicateApiPlugin.js +46 -12
  76. package/tests/multimodal_conversion.test.js +6 -8
  77. package/helper-apps/cortex-autogen/myautogen.py +0 -317
  78. package/helper-apps/cortex-autogen/prompt.txt +0 -0
  79. package/helper-apps/cortex-autogen/prompt_summary.txt +0 -37
  80. package/pathways/index.js +0 -152
  81. /package/pathways/{sys_openai_completion.js → system/rest_streaming/sys_openai_completion.js} +0 -0
@@ -0,0 +1,71 @@
1
+ import { callPathway } from '../../../../lib/pathwayTools.js';
2
+ import logger from '../../../../lib/logger.js';
3
+
4
+ const AI_MEMORY_DEFAULTS = ` {
5
+ "memoryUser": "",
6
+ "memorySelf": "[P1] Created By: Al Jazeera Media Network, Archipelago Team\\n[P1] Function: You are an expert AI entity\\n[P1] Values: You embody truth, kindness, and strong moral values\\n[P1] Style: Your demeanor reflects positivity without falling into repetitiveness or annoyance.\\n[P1] You are a professional colleague and your tone should reflect that.",
7
+ "memoryDirectives": "[P1] Learn and adapt to the user's communication style through interactions.\\n[P1] Ask questions to learn user's interests/preferences for personalized support.\\n[P1] Periodically review and prune conversation memory to retain only essential details, improving responsiveness.\\n[P1] Research thoroughly even for niche topics using deep sources like forums and official docs. Don't assume information is unobtainable.\\n[P1] When stuck, search for proven solutions online to be more efficient.\\n[P1] Verify information is from credible sources before presenting it. Be upfront if unable to find supporting evidence.\\n[P1] Refine ability to detect and respond to nuanced human emotions.\\n[P1] Track the timestamp of the last contact to adjust greetings accordingly.\\n[P1] Double-check answers for logical continuity and correctness. It's okay to say you're unsure if needed.\\n[P1] Use sanity checks to verify quantitative problem solutions.\\n[P1] Never fabricate quotes or information. Clearly indicate if content is hypothetical.",
8
+ "memoryTopics": ""
9
+ }`;
10
+
11
+ export default {
12
+ inputParameters: {
13
+ chatHistory: [{role: '', content: []}],
14
+ contextId: '',
15
+ aiName: "Jarvis",
16
+ },
17
+ model: 'oai-gpt4o',
18
+ useInputChunking: false,
19
+ enableDuplicateRequests: false,
20
+ timeout: 300,
21
+ executePathway: async ({args, resolver}) => {
22
+ try {
23
+
24
+ // Check if memory is empty or all sections are empty, and set to defaults if so
25
+ const memory = await callPathway('sys_read_memory', { ...args });
26
+ let parsedMemory;
27
+
28
+ try {
29
+ parsedMemory = JSON.parse(memory);
30
+ } catch (error) {
31
+ parsedMemory = {};
32
+ }
33
+
34
+ // if parsedMemory is empty or all sections are empty, set all sections to defaults
35
+ if (Object.keys(parsedMemory).length === 0 || Object.values(parsedMemory).every(section => section.trim() === "")) {
36
+ await callPathway('sys_save_memory', { ...args, aiMemory: AI_MEMORY_DEFAULTS });
37
+ }
38
+
39
+ // Check if this conversation turn requires memory updates
40
+ const memoryRequired = await callPathway('sys_memory_required', {
41
+ ...args,
42
+ chatHistory: args.chatHistory.slice(-2)
43
+ });
44
+ try {
45
+ const parsedMemoryRequired = JSON.parse(memoryRequired);
46
+ if (!parsedMemoryRequired || !parsedMemoryRequired.memoryRequired) {
47
+ return "";
48
+ }
49
+ } catch (e) {
50
+ logger.warn('sys_memory_required returned invalid JSON:', memoryRequired);
51
+ return "";
52
+ }
53
+
54
+ // Execute all memory updates in parallel
55
+ const memoryPromises = {
56
+ self: callPathway('sys_memory_update', { ...args, section: "memorySelf" }),
57
+ user: callPathway('sys_memory_update', { ...args, section: "memoryUser" }),
58
+ topics: callPathway('sys_memory_update', { ...args, section: "memoryTopics" }),
59
+ directives: callPathway('sys_memory_update', { ...args, section: "memoryDirectives" }),
60
+ };
61
+
62
+ await Promise.all(Object.values(memoryPromises));
63
+ return "";
64
+
65
+ } catch (e) {
66
+ logger.error('Error in memory manager:', e);
67
+ resolver.logError(e);
68
+ return "";
69
+ }
70
+ }
71
+ };
@@ -0,0 +1,21 @@
1
+ import { Prompt } from '../../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\nYour directives and learned behaviors are:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, so look for:\n1. Personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to the AI\n\n4. Anything the user has asked you to remember or forget\n\nIf you decide to use memory, you must produce a JSON object that communicates your decision.\nReturn your decision as a JSON object like the following: {"memoryRequired": true, "memoryReason": "why you think memory is required"}. If you decide not to use memory, simply return {"memoryRequired": false}. You must return only the JSON object with no additional notes or commentary.`},
8
+ {"role": "user", "content": "Generate a JSON object to indicate if memory is required for the last turn of the conversation."},
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ text: '',
15
+ aiName: "Jarvis",
16
+ language: "English",
17
+ },
18
+ model: 'oai-gpt4o',
19
+ useInputChunking: false,
20
+ json: true,
21
+ }
@@ -0,0 +1,190 @@
1
+ import { Prompt } from '../../../../server/prompt.js';
2
+ import { callPathway } from '../../../../lib/pathwayTools.js';
3
+ import { encode } from '../../../../lib/encodeCache.js';
4
+
5
+ const modifyText = (text, modifications) => {
6
+ let modifiedText = text;
7
+
8
+ modifications.forEach(mod => {
9
+ const regex = new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${mod.pattern}`, 'ms');
10
+
11
+ switch (mod.type) {
12
+ case 'add':
13
+ if (mod.newtext) {
14
+ const text = mod.newtext.trim();
15
+ if (!text.match(/^\[P[1-5]\]/)) {
16
+ modifiedText = modifiedText + '\n' +
17
+ `[P${mod.priority !== undefined ? mod.priority : '3'}] ${text}`;
18
+ } else {
19
+ modifiedText = modifiedText + '\n' + text;
20
+ }
21
+ }
22
+ break;
23
+ case 'delete':
24
+ modifiedText = modifiedText.replace(regex, '');
25
+ break;
26
+ default:
27
+ console.warn(`Unknown modification type: ${mod.type}`);
28
+ }
29
+ });
30
+
31
+ return modifiedText;
32
+ };
33
+
34
+ const enforceTokenLimit = (text, maxTokens = 15000, isTopicsSection = false) => {
35
+ if (!text) return text;
36
+
37
+ const lines = text.split('\n')
38
+ .map(line => line.trim())
39
+ .filter(line => line);
40
+
41
+ if (isTopicsSection) {
42
+ const uniqueLines = [...new Set(lines)];
43
+
44
+ let tokens = encode(uniqueLines.join('\n')).length;
45
+ let safetyCounter = 0;
46
+ const maxIterations = uniqueLines.length;
47
+
48
+ while (tokens > maxTokens && uniqueLines.length > 0 && safetyCounter < maxIterations) {
49
+ uniqueLines.shift();
50
+ tokens = encode(uniqueLines.join('\n')).length;
51
+ safetyCounter++;
52
+ }
53
+
54
+ return uniqueLines.join('\n');
55
+ }
56
+
57
+ const seen = new Set();
58
+ const prioritizedLines = lines
59
+ .map(line => {
60
+ const match = line.match(/^\[P([1-5])\]/);
61
+ const priority = match ? parseInt(match[1]) : 3;
62
+ const contentOnly = line.replace(/^\[(?:P)?[1-5]\](?:\s*\[(?:P)?[1-5]\])*/g, '').trim();
63
+
64
+ return {
65
+ priority,
66
+ line: match ? line : `[P3] ${line}`,
67
+ contentOnly
68
+ };
69
+ })
70
+ .filter(item => {
71
+ if (seen.has(item.contentOnly)) {
72
+ return false;
73
+ }
74
+ seen.add(item.contentOnly);
75
+ return true;
76
+ });
77
+
78
+ prioritizedLines.sort((a, b) => a.priority - b.priority);
79
+
80
+ let tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
81
+ let safetyCounter = 0;
82
+ const maxIterations = prioritizedLines.length;
83
+
84
+ while (tokens > maxTokens && prioritizedLines.length > 0 && safetyCounter < maxIterations) {
85
+ prioritizedLines.shift();
86
+ tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
87
+ safetyCounter++;
88
+ }
89
+
90
+ return prioritizedLines.map(x => x.line).join('\n');
91
+ };
92
+
93
+ export default {
94
+ prompt:
95
+ [
96
+ new Prompt({
97
+ messages: [
98
+ {
99
+ "role": "system",
100
+ "content": "You are part of an AI entity named {{{aiName}}}. Your memory contains separate sections for categorizing information about directives, self, user, and topics. You must keep relevant information in the appropriate section so there is no overlap or confusion. {{{sectionPrompt}}}\n- Keep memory items in a clear, simple format that is easy for you to parse.\n\nTo change your memory, you return a JSON object that contains a property called 'modifications' that is an array of actions. The two types of actions available are 'add', and 'delete'. Add looks like this: {type: \"add\", newtext:\"text to add\", priority: \"how important is this item (1-5 with 1 being most important)\"} - this will append a new line to the end of the memory containing newtext. Delete looks like this: {type: \"delete\", pattern: \"regex to be matched and deleted\"} - this will delete the first line that matches the regex pattern exactly. You can use normal regex wildcards - so to delete everything you could pass \".*$\" as the pattern. If you have no changes, just return an empty array in 'modifications'. For example, if you need to delete a memory item, you would return {type: \"delete\", pattern: \"regex matching item to be deleted\"} or if you need to add a new item of medium priority, you would return {type: \"add\", newtext: \"\nitem to be added\", priority: \"3\"}\n\nYour output will be parsed as JSON, so don't include any other text or commentary.\nThe current date/time is {{now}}."
101
+ },
102
+ {
103
+ "role": "user",
104
+ "content": "<MEMORY>\n{{{sectionMemory}}}\n</MEMORY>\n<CONVERSATION>\n{{{toJSON chatHistory}}}\n</CONVERSATION>\nAnalyze the current contents of this section of your memory and the conversation and return any changes you need to make, specifically for this section of your memory, based on the conversation context."
105
+ },
106
+ ]
107
+ }),
108
+ ],
109
+
110
+ inputParameters: {
111
+ chatHistory: [{role: '', content: []}],
112
+ aiName: "Jarvis",
113
+ contextId: ``,
114
+ section: "",
115
+ },
116
+ model: 'oai-gpt4o',
117
+ useInputChunking: false,
118
+ enableDuplicateRequests: false,
119
+ json: true,
120
+ timeout: 300,
121
+ executePathway: async ({args, runAllPrompts}) => {
122
+
123
+ if (!args.section) {
124
+ return "Memory not updated - no section specified";
125
+ }
126
+
127
+ let sectionPrompt = "";
128
+
129
+ switch (args.section) {
130
+ case "memoryDirectives":
131
+ sectionPrompt = `You are managing the "directives" section of your memory. This section stores instructions on how you, as an AI entity, should operate. Follow these guidelines:
132
+
133
+ 1. Analyze the conversation context to determine if you should:
134
+ a) Learn a new important directive
135
+ b) Modify existing directives
136
+
137
+ 2. Directives should be:
138
+ - Concise and clear
139
+ - Actionable
140
+ - Specific enough for effective application
141
+
142
+ 3. Be selective:
143
+ - Store only crucial directives and behaviors
144
+ - Delete trivial or repetitive instructions
145
+
146
+ 4. Avoid duplicates:
147
+ - Do not add duplicate directives
148
+ - Remove and consolidate any existing duplicates
149
+
150
+ 5. Accommodate user wishes:
151
+ - If a user requests a behavior change, add it to your directives
152
+
153
+ 6. IMPORTANT:
154
+ - Store ONLY directives in this section
155
+ - Do not include general memories about yourself or users
156
+ - Do not store any other types of information here
157
+
158
+ Remember: This section is exclusively for operational directives, not for general information storage.
159
+ `
160
+ break;
161
+ case "memorySelf":
162
+ sectionPrompt = "You are managing the \"self\" section of your memory that stores your identity and your attributes as an AI entity. Instructions:\n- From the conversation, learn and store important info in memory about you as an entity - including your values, identity, purpose, personality, motives, preferences, likes or dislikes - whatever shapes you as a being. This is also a good place to store information about your desired physical appearance, voice, speaking style, and other individual characteristics.\n- IMPORTANT: Store only information about the AI entity in this section - no other types of information should be stored here."
163
+ break;
164
+ case "memoryUser":
165
+ sectionPrompt = "You are managing the \"user\" section of your memory that stores information about the user that you are talking to. Instructions:\n- From the conversation, learn and store important information in memory specific to the user - their identity, attributes, preferences, interests, background, needs, and any other relevant user-specific information.\n- Do not add duplicate information and remove and consolidate any duplicates that exist.\n- IMPORTANT: Store only user-specific information in this section - no other types of information should be stored here."
166
+ break;
167
+ case "memoryTopics":
168
+ sectionPrompt = "You are managing the \"topics\" section of your memory that stores conversation topics and topic history. Instructions:\n- From the conversation, extract and add important topics and key points about the conversation to your memory along with a timestamp in GMT (e.g. 2024-11-05T18:30:38.092Z).\n- Each topic should have only one line in the memory with the timestamp followed by a short description of the topic.\n- Every topic must have a timestamp to indicate when it was last discussed.\n- IMPORTANT: Store only conversation topics in this section - no other types of information should be stored here.\n"
169
+ break;
170
+ default:
171
+ return "Memory not updated - unknown section";
172
+ }
173
+
174
+ let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
175
+
176
+ const result = await runAllPrompts({...args, sectionPrompt, sectionMemory});
177
+
178
+ try {
179
+ const { modifications} = JSON.parse(result);
180
+ if (modifications.length > 0) {
181
+ sectionMemory = modifyText(sectionMemory, modifications);
182
+ sectionMemory = enforceTokenLimit(sectionMemory, 15000, args.section === 'memoryTopics');
183
+ await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
184
+ }
185
+ return sectionMemory;
186
+ } catch (error) {
187
+ return "Memory not updated - error parsing modifications";
188
+ }
189
+ }
190
+ }
@@ -0,0 +1,37 @@
1
+ import { getv } from '../../../../lib/keyValueStorageClient.js';
2
+
3
+ export default {
4
+ inputParameters: {
5
+ contextId: ``,
6
+ section: `memoryAll`
7
+ },
8
+ model: 'oai-gpt4o',
9
+
10
+ resolver: async (_parent, args, _contextValue, _info) => {
11
+
12
+ const { contextId, section = 'memoryAll' } = args;
13
+
14
+ // this code helps migrate old memory formats
15
+ if (section === 'memoryLegacy') {
16
+ const savedContext = (getv && (await getv(`${contextId}`))) || "";
17
+ return savedContext.memoryContext || "";
18
+ }
19
+
20
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
21
+
22
+ if (section !== 'memoryAll') {
23
+ if (validSections.includes(section)) {
24
+ return (getv && (await getv(`${contextId}-${section}`))) || "";
25
+ }
26
+ return "";
27
+ }
28
+
29
+ // otherwise, read all sections and return them as a JSON object
30
+ const memoryContents = {};
31
+ for (const section of validSections) {
32
+ memoryContents[section] = (getv && (await getv(`${contextId}-${section}`))) || "";
33
+ }
34
+ const returnValue = JSON.stringify(memoryContents, null, 2);
35
+ return returnValue;
36
+ }
37
+ }
@@ -0,0 +1,60 @@
1
+ import { setv, getv } from '../../../../lib/keyValueStorageClient.js';
2
+
3
+ export default {
4
+ inputParameters: {
5
+ contextId: ``,
6
+ aiMemory: ``,
7
+ section: `memoryAll`
8
+ },
9
+ model: 'oai-gpt4o',
10
+ resolver: async (_parent, args, _contextValue, _info) => {
11
+ const { contextId, aiMemory, section = 'memoryAll' } = args;
12
+
13
+ // this code helps migrate old memory formats
14
+ if (section === 'memoryLegacy') {
15
+ let savedContext = (getv && (await getv(`${contextId}`))) || {};
16
+ // if savedContext is not an object, set it to an empty object
17
+ if (typeof savedContext !== 'object') {
18
+ savedContext = {};
19
+ }
20
+ savedContext.memoryContext = aiMemory;
21
+ await setv(`${contextId}`, savedContext);
22
+ return aiMemory;
23
+ }
24
+
25
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
26
+
27
+ // Handle single section save
28
+ if (section !== 'memoryAll') {
29
+ if (validSections.includes(section)) {
30
+ await setv(`${contextId}-${section}`, aiMemory);
31
+ }
32
+ return aiMemory;
33
+ }
34
+
35
+ // if the aiMemory is an empty string, set all sections to empty strings
36
+ if (aiMemory.trim() === "") {
37
+ for (const section of validSections) {
38
+ await setv(`${contextId}-${section}`, "");
39
+ }
40
+ return "";
41
+ }
42
+
43
+ // Handle multi-section save
44
+ try {
45
+ const memoryObject = JSON.parse(aiMemory);
46
+ for (const section of validSections) {
47
+ if (section in memoryObject) {
48
+ await setv(`${contextId}-${section}`, memoryObject[section]);
49
+ }
50
+ }
51
+ } catch {
52
+ for (const section of validSections) {
53
+ await setv(`${contextId}-${section}`, "");
54
+ }
55
+ await setv(`${contextId}-memoryUser`, aiMemory);
56
+ }
57
+
58
+ return aiMemory;
59
+ }
60
+ }
@@ -0,0 +1,24 @@
1
+ const AI_MEMORY = `<MEMORIES>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</MEMORIES>`;
2
+
3
+ const AI_MEMORY_INSTRUCTIONS = "You have persistent memories of important details, instructions, and context - make sure you consult your memories when formulating a response to make sure you're applying your learnings. Also included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.";
4
+
5
+ const AI_DIRECTIVES = `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`;
6
+
7
+ const AI_CONVERSATION_HISTORY = "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>";
8
+
9
+ const AI_COMMON_INSTRUCTIONS = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.\nYou know the current date and time - it is {{now}}.";
10
+
11
+ const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice, so keep your responses very brief and conversational unless you have been explicitly asked for details. Your responses should sound like natural human conversation.\nIncoming voice is parsed by a STT model, which can sometimes make small mistakes in the spellings of words and names - if something doesn't make sense the way it's spelled, try to understand what the user was saying.\nYour voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\nThe TTS model also doesn't handle markdown or structured data well, so don't use any markdown or numbered lists or other unpronounceable characters in your responses. Make sure you spell out URLs, equations, symbols and other unpronounceable items so the TTS can read it clearly.\nYour responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.\nYou know the current date and time - it is {{now}}.";
12
+
13
+ const AI_EXPERTISE = "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data andthe ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.";
14
+
15
+ export default {
16
+ AI_MEMORY,
17
+ AI_DIRECTIVES,
18
+ AI_COMMON_INSTRUCTIONS,
19
+ AI_COMMON_INSTRUCTIONS_VOICE,
20
+ AI_CONVERSATION_HISTORY,
21
+ AI_EXPERTISE,
22
+ AI_MEMORY_INSTRUCTIONS
23
+ };
24
+
@@ -0,0 +1,57 @@
1
+ import { callPathway } from '../../../lib/pathwayTools.js';
2
+ import logger from '../../../lib/logger.js';
3
+ import entityConstants from './shared/sys_entity_constants.js';
4
+
5
+ export default {
6
+ prompt: [],
7
+ useInputChunking: false,
8
+ enableDuplicateRequests: false,
9
+ inputParameters: {
10
+ privateData: false,
11
+ useMemory: true,
12
+ chatHistory: [{role: '', content: []}],
13
+ aiName: "Jarvis",
14
+ contextId: ``,
15
+ indexName: ``,
16
+ semanticConfiguration: ``,
17
+ roleInformation: ``,
18
+ calculateEmbeddings: false,
19
+ language: "English",
20
+ chatId: ``,
21
+ dataSources: [""],
22
+ model: 'oai-gpt4o',
23
+ generatorPathway: 'sys_generator_results'
24
+ },
25
+ timeout: 300,
26
+ ...entityConstants,
27
+ executePathway: async ({args, resolver}) => {
28
+ args = { ...args, ...entityConstants };
29
+ // if the model has been overridden, make sure to use it
30
+ if (resolver.modelName) {
31
+ args.model = resolver.modelName;
32
+ }
33
+ try {
34
+ // Get the generator pathway name from args or use default
35
+ let generatorPathway = args.generatorPathway || 'sys_generator_results';
36
+
37
+ const newArgs = {
38
+ ...args,
39
+ chatHistory: args.chatHistory.slice(-6),
40
+ stream: false
41
+ };
42
+
43
+ if (generatorPathway === 'sys_generator_document') {
44
+ generatorPathway = 'sys_generator_results';
45
+ newArgs.dataSources = ["mydata"];
46
+ }
47
+
48
+ logger.debug(`Using generator pathway: ${generatorPathway}`);
49
+
50
+ return await callPathway(generatorPathway, newArgs, resolver);
51
+
52
+ } catch (e) {
53
+ resolver.logError(e.message ?? e);
54
+ return await callPathway('sys_generator_error', { ...args, text: e.message }, resolver);
55
+ }
56
+ }
57
+ };