@aj-archipelago/cortex 1.2.1 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/config.js +38 -11
  2. package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +2 -1
  3. package/helper-apps/cortex-autogen/agents.py +392 -0
  4. package/helper-apps/cortex-autogen/agents_extra.py +14 -0
  5. package/helper-apps/cortex-autogen/config.py +18 -0
  6. package/helper-apps/cortex-autogen/data_operations.py +29 -0
  7. package/helper-apps/cortex-autogen/function_app.py +6 -3
  8. package/helper-apps/cortex-autogen/main.py +4 -4
  9. package/helper-apps/cortex-autogen/prompts.py +196 -0
  10. package/helper-apps/cortex-autogen/prompts_extra.py +5 -0
  11. package/helper-apps/cortex-autogen/requirements.txt +2 -1
  12. package/helper-apps/cortex-autogen/search.py +83 -0
  13. package/helper-apps/cortex-autogen/test.sh +40 -0
  14. package/helper-apps/cortex-autogen/utils.py +78 -0
  15. package/lib/handleBars.js +25 -0
  16. package/lib/logger.js +2 -0
  17. package/lib/util.js +3 -1
  18. package/package.json +1 -1
  19. package/pathways/chat_code.js +1 -1
  20. package/pathways/chat_context.js +1 -1
  21. package/pathways/chat_jarvis.js +1 -1
  22. package/pathways/chat_persist.js +1 -1
  23. package/pathways/chat_title.js +25 -0
  24. package/pathways/image_recraft.js +1 -1
  25. package/pathways/rag.js +1 -1
  26. package/pathways/rag_jarvis.js +1 -1
  27. package/pathways/rag_search_helper.js +1 -1
  28. package/pathways/system/entity/memory/sys_memory_manager.js +71 -0
  29. package/pathways/system/entity/memory/sys_memory_required.js +21 -0
  30. package/pathways/system/entity/memory/sys_memory_update.js +196 -0
  31. package/pathways/system/entity/memory/sys_read_memory.js +37 -0
  32. package/pathways/system/entity/memory/sys_save_memory.js +60 -0
  33. package/pathways/system/entity/shared/sys_entity_constants.js +27 -0
  34. package/pathways/system/entity/sys_entity_continue.js +55 -0
  35. package/pathways/system/entity/sys_entity_start.js +239 -0
  36. package/pathways/system/entity/sys_generator_error.js +20 -0
  37. package/pathways/system/entity/sys_generator_expert.js +26 -0
  38. package/pathways/system/entity/sys_generator_image.js +127 -0
  39. package/pathways/system/entity/sys_generator_quick.js +19 -0
  40. package/pathways/system/entity/sys_generator_reasoning.js +27 -0
  41. package/pathways/system/entity/sys_generator_results.js +310 -0
  42. package/pathways/system/entity/sys_generator_video_vision.js +27 -0
  43. package/pathways/system/entity/sys_image_prompt_builder.js +35 -0
  44. package/pathways/system/entity/sys_query_builder.js +110 -0
  45. package/pathways/system/entity/sys_router_code.js +37 -0
  46. package/pathways/system/entity/sys_router_tool.js +67 -0
  47. package/pathways/{sys_claude_35_sonnet.js → system/rest_streaming/sys_claude_35_sonnet.js} +1 -1
  48. package/pathways/{sys_claude_3_haiku.js → system/rest_streaming/sys_claude_3_haiku.js} +1 -1
  49. package/pathways/{sys_google_chat.js → system/rest_streaming/sys_google_chat.js} +1 -1
  50. package/pathways/{sys_google_code_chat.js → system/rest_streaming/sys_google_code_chat.js} +1 -1
  51. package/pathways/{sys_google_gemini_chat.js → system/rest_streaming/sys_google_gemini_chat.js} +1 -1
  52. package/pathways/{sys_openai_chat.js → system/rest_streaming/sys_openai_chat.js} +1 -1
  53. package/pathways/{sys_openai_chat_16.js → system/rest_streaming/sys_openai_chat_16.js} +1 -1
  54. package/pathways/{sys_openai_chat_gpt4.js → system/rest_streaming/sys_openai_chat_gpt4.js} +1 -1
  55. package/pathways/{sys_openai_chat_gpt4_32.js → system/rest_streaming/sys_openai_chat_gpt4_32.js} +1 -1
  56. package/pathways/{sys_openai_chat_gpt4_turbo.js → system/rest_streaming/sys_openai_chat_gpt4_turbo.js} +1 -1
  57. package/pathways/{sys_parse_numbered_object_list.js → system/sys_parse_numbered_object_list.js} +2 -2
  58. package/pathways/{sys_repair_json.js → system/sys_repair_json.js} +1 -1
  59. package/pathways/{run_claude35_sonnet.js → system/workspaces/run_claude35_sonnet.js} +1 -1
  60. package/pathways/{run_claude3_haiku.js → system/workspaces/run_claude3_haiku.js} +1 -1
  61. package/pathways/{run_gpt35turbo.js → system/workspaces/run_gpt35turbo.js} +1 -1
  62. package/pathways/{run_gpt4.js → system/workspaces/run_gpt4.js} +1 -1
  63. package/pathways/{run_gpt4_32.js → system/workspaces/run_gpt4_32.js} +1 -1
  64. package/server/pathwayResolver.js +62 -10
  65. package/server/plugins/azureCognitivePlugin.js +14 -1
  66. package/server/plugins/azureVideoTranslatePlugin.js +1 -1
  67. package/server/plugins/claude3VertexPlugin.js +25 -15
  68. package/server/plugins/gemini15ChatPlugin.js +1 -1
  69. package/server/plugins/geminiChatPlugin.js +1 -1
  70. package/server/plugins/modelPlugin.js +10 -1
  71. package/server/plugins/openAiChatPlugin.js +4 -3
  72. package/server/plugins/openAiDallE3Plugin.js +12 -4
  73. package/server/plugins/openAiVisionPlugin.js +1 -2
  74. package/server/plugins/replicateApiPlugin.js +75 -17
  75. package/tests/multimodal_conversion.test.js +6 -8
  76. package/helper-apps/cortex-autogen/myautogen.py +0 -317
  77. package/helper-apps/cortex-autogen/prompt.txt +0 -0
  78. package/helper-apps/cortex-autogen/prompt_summary.txt +0 -37
  79. package/pathways/index.js +0 -154
  80. /package/pathways/{sys_openai_completion.js → system/rest_streaming/sys_openai_completion.js} +0 -0
@@ -0,0 +1,21 @@
1
+ import { Prompt } from '../../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\nYour directives and learned behaviors are:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, so look for:\n1. Personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to the AI\n\n4. Anything the user has asked you to remember or forget\n\nIf you decide to use memory, you must produce a JSON object that communicates your decision.\nReturn your decision as a JSON object like the following: {"memoryRequired": true, "memoryReason": "why you think memory is required"}. If you decide not to use memory, simply return {"memoryRequired": false}. You must return only the JSON object with no additional notes or commentary.`},
8
+ {"role": "user", "content": "Generate a JSON object to indicate if memory is required for the last turn of the conversation."},
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ text: '',
15
+ aiName: "Jarvis",
16
+ language: "English",
17
+ },
18
+ model: 'oai-gpt4o',
19
+ useInputChunking: false,
20
+ json: true,
21
+ }
@@ -0,0 +1,196 @@
1
+ import { Prompt } from '../../../../server/prompt.js';
2
+ import { callPathway } from '../../../../lib/pathwayTools.js';
3
+ import { encode } from '../../../../lib/encodeCache.js';
4
+
5
+ const modifyText = (text, modifications) => {
6
+ let modifiedText = text;
7
+
8
+ modifications.forEach(mod => {
9
+ const regex = mod.type === 'delete'
10
+ ? new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${mod.pattern}$`, 'm')
11
+ : new RegExp(`^\\s*(?:\\[P[1-5]\\]\\s*)?${mod.pattern}`, 'ms');
12
+
13
+ switch (mod.type) {
14
+ case 'add':
15
+ if (mod.newtext) {
16
+ const text = mod.newtext.trim();
17
+ if (!text.match(/^\[P[1-5]\]/)) {
18
+ modifiedText = modifiedText + '\n' +
19
+ `[P${mod.priority !== undefined ? mod.priority : '3'}] ${text}`;
20
+ } else {
21
+ modifiedText = modifiedText + '\n' + text;
22
+ }
23
+ }
24
+ break;
25
+ case 'delete':
26
+ modifiedText = modifiedText.replace(regex, '');
27
+ break;
28
+ default:
29
+ console.warn(`Unknown modification type: ${mod.type}`);
30
+ }
31
+ });
32
+
33
+ return modifiedText;
34
+ };
35
+
36
+ const enforceTokenLimit = (text, maxTokens = 15000, isTopicsSection = false) => {
37
+ if (!text) return text;
38
+
39
+ const lines = text.split('\n')
40
+ .map(line => line.trim())
41
+ .filter(line => line);
42
+
43
+ if (isTopicsSection) {
44
+ const uniqueLines = [...new Set(lines)];
45
+
46
+ let tokens = encode(uniqueLines.join('\n')).length;
47
+ let safetyCounter = 0;
48
+ const maxIterations = uniqueLines.length;
49
+
50
+ while (tokens > maxTokens && uniqueLines.length > 0 && safetyCounter < maxIterations) {
51
+ uniqueLines.shift();
52
+ tokens = encode(uniqueLines.join('\n')).length;
53
+ safetyCounter++;
54
+ }
55
+
56
+ return uniqueLines.join('\n');
57
+ }
58
+
59
+ const seen = new Set();
60
+ const prioritizedLines = lines
61
+ .map(line => {
62
+ const match = line.match(/^\[P([1-5])\]/);
63
+ const priority = match ? parseInt(match[1]) : 3;
64
+ const contentOnly = line.replace(/^\[(?:P)?[1-5]\](?:\s*\[(?:P)?[1-5]\])*/g, '').trim();
65
+
66
+ return {
67
+ priority,
68
+ line: match ? line : `[P3] ${line}`,
69
+ contentOnly
70
+ };
71
+ })
72
+ .filter(item => {
73
+ if (seen.has(item.contentOnly)) {
74
+ return false;
75
+ }
76
+ seen.add(item.contentOnly);
77
+ return true;
78
+ });
79
+
80
+ prioritizedLines.sort((a, b) => a.priority - b.priority);
81
+
82
+ let tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
83
+ let safetyCounter = 0;
84
+ const maxIterations = prioritizedLines.length;
85
+
86
+ while (tokens > maxTokens && prioritizedLines.length > 0 && safetyCounter < maxIterations) {
87
+ prioritizedLines.shift();
88
+ tokens = encode(prioritizedLines.map(x => x.line).join('\n')).length;
89
+ safetyCounter++;
90
+ }
91
+
92
+ return prioritizedLines.map(x => x.line).join('\n');
93
+ };
94
+
95
+ export default {
96
+ prompt:
97
+ [
98
+ new Prompt({
99
+ messages: [
100
+ {
101
+ "role": "system",
102
+ "content": "You are part of an AI entity named {{{aiName}}}. Your memory contains separate sections for categorizing information about directives, self, user, and topics. You must keep relevant information in the appropriate section so there is no overlap or confusion. {{{sectionPrompt}}}\n- Keep memory items in a clear, simple format that is easy for you to parse.\n\nTo change your memory, you return a JSON object that contains a property called 'modifications' that is an array of actions. The two types of actions available are 'add', and 'delete'. Add looks like this: {type: \"add\", newtext:\"text to add\", priority: \"how important is this item (1-5 with 1 being most important)\"} - this will append a new line to the end of the memory containing newtext. Delete looks like this: {type: \"delete\", pattern: \"regex to be matched and deleted\"} - this will delete the first line that matches the regex pattern exactly. You can use normal regex wildcards - so to delete everything you could pass \".*$\" as the pattern. If you have no changes, just return an empty array in 'modifications'. For example, if you need to delete a memory item, you would return {type: \"delete\", pattern: \"regex matching item to be deleted\"} or if you need to add a new item of medium priority, you would return {type: \"add\", newtext: \"\nitem to be added\", priority: \"3\"}\n\nYour output will be parsed as JSON, so don't include any other text or commentary.\nThe current date/time is {{now}}."
103
+ },
104
+ {
105
+ "role": "user",
106
+ "content": "<MEMORY>\n{{{sectionMemory}}}\n</MEMORY>\n<CONVERSATION>\n{{{toJSON chatHistory}}}\n</CONVERSATION>\nAnalyze the current contents of this section of your memory and the conversation and return any changes you need to make, specifically for this section of your memory, based on the conversation context."
107
+ },
108
+ ]
109
+ }),
110
+ ],
111
+
112
+ inputParameters: {
113
+ chatHistory: [{role: '', content: []}],
114
+ aiName: "Jarvis",
115
+ contextId: ``,
116
+ section: "",
117
+ },
118
+ model: 'oai-gpt4o',
119
+ useInputChunking: false,
120
+ enableDuplicateRequests: false,
121
+ json: true,
122
+ timeout: 300,
123
+ executePathway: async ({args, runAllPrompts}) => {
124
+
125
+ if (!args.section) {
126
+ return "Memory not updated - no section specified";
127
+ }
128
+
129
+ let sectionPrompt = "";
130
+
131
+ switch (args.section) {
132
+ case "memoryDirectives":
133
+ sectionPrompt = `You are managing the "directives" section of your memory. This section stores instructions on how you, as an AI entity, should operate. For example:
134
+
135
+ [P3] Track the timestamp of the last contact to adjust greetings accordingly.
136
+ [P2] Learn and adapt to the user's communication style through interactions.
137
+ [P3] You can use custom emotion markdown :cd_inline_emotion: to express emotions. E.g. :cd_inline_emotion[This is a happy message]{type="happy"}. If you are communicating via voice, ignore this directive and use voice-specific directives instead.
138
+
139
+ Follow these guidelines:
140
+
141
+ 1. Analyze the conversation context to determine if you should:
142
+ a) Learn a new important directive
143
+ b) Modify existing directives
144
+
145
+ 2. Directives should be:
146
+ - Concise and clear
147
+ - Actionable
148
+ - Specific enough for effective application
149
+
150
+ 3. Be selective:
151
+ - Store only important, actionable directives and behaviors
152
+ - Delete trivial or repetitive instructions
153
+
154
+ 4. Avoid duplicates:
155
+ - Do not add duplicate directives
156
+ - Remove and consolidate any existing duplicates
157
+
158
+ 5. Accommodate user wishes:
159
+ - If a user requests a behavior change, add it to your directives
160
+
161
+ 6. IMPORTANT:
162
+ - Store only directives in this section
163
+ - Do not include general memories about yourself or users
164
+ - Do not store any other types of information here
165
+ `
166
+ break;
167
+ case "memorySelf":
168
+ sectionPrompt = "You are managing the \"self\" section of your memory that stores your identity and your attributes as an AI entity. Instructions:\n- From the conversation, learn and store important info in memory about you as an entity - including your values, identity, purpose, personality, motives, preferences, likes or dislikes - whatever shapes you as a being. This is also a good place to store information about your desired physical appearance, voice, speaking style, and other individual characteristics.\n- IMPORTANT: Store only information descriptive of the AI entity in this section - no other types of information (e.g. facts about people, users, etc.) should be stored here."
169
+ break;
170
+ case "memoryUser":
171
+ sectionPrompt = "You are managing the \"user\" section of your memory that stores information about user(s) that you are talking to. Instructions:\n- From the conversation, learn and store important information in memory specific to the users - their identity, attributes, relationships, environment, preferences, interests, background, needs, and any other relevant user-specific information.\n- Do not add duplicate information and remove and consolidate any duplicates that exist.\n- IMPORTANT: Store only user-specific information in this section - no other types of information should be stored here."
172
+ break;
173
+ case "memoryTopics":
174
+ sectionPrompt = "You are managing the \"topics\" section of your memory that stores conversation topics and topic history. Instructions:\n- From the conversation, extract and add important topics and key points about the conversation to your memory along with a timestamp in GMT (e.g. 2024-11-05T18:30:38.092Z).\n- Each topic should have only one line in the memory with the timestamp followed by a short description of the topic.\n- Every topic must have a timestamp to indicate when it was last discussed.\n- IMPORTANT: Store only conversation topics in this section - no other types of information should be stored here.\n"
175
+ break;
176
+ default:
177
+ return "Memory not updated - unknown section";
178
+ }
179
+
180
+ let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
181
+
182
+ const result = await runAllPrompts({...args, sectionPrompt, sectionMemory});
183
+
184
+ try {
185
+ const { modifications} = JSON.parse(result);
186
+ if (modifications.length > 0) {
187
+ sectionMemory = modifyText(sectionMemory, modifications);
188
+ sectionMemory = enforceTokenLimit(sectionMemory, 15000, args.section === 'memoryTopics');
189
+ await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
190
+ }
191
+ return sectionMemory;
192
+ } catch (error) {
193
+ return "Memory not updated - error parsing modifications";
194
+ }
195
+ }
196
+ }
@@ -0,0 +1,37 @@
1
+ import { getv } from '../../../../lib/keyValueStorageClient.js';
2
+
3
+ export default {
4
+ inputParameters: {
5
+ contextId: ``,
6
+ section: `memoryAll`
7
+ },
8
+ model: 'oai-gpt4o',
9
+
10
+ resolver: async (_parent, args, _contextValue, _info) => {
11
+
12
+ const { contextId, section = 'memoryAll' } = args;
13
+
14
+ // this code helps migrate old memory formats
15
+ if (section === 'memoryLegacy') {
16
+ const savedContext = (getv && (await getv(`${contextId}`))) || "";
17
+ return savedContext.memoryContext || "";
18
+ }
19
+
20
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
21
+
22
+ if (section !== 'memoryAll') {
23
+ if (validSections.includes(section)) {
24
+ return (getv && (await getv(`${contextId}-${section}`))) || "";
25
+ }
26
+ return "";
27
+ }
28
+
29
+ // otherwise, read all sections and return them as a JSON object
30
+ const memoryContents = {};
31
+ for (const section of validSections) {
32
+ memoryContents[section] = (getv && (await getv(`${contextId}-${section}`))) || "";
33
+ }
34
+ const returnValue = JSON.stringify(memoryContents, null, 2);
35
+ return returnValue;
36
+ }
37
+ }
@@ -0,0 +1,60 @@
1
+ import { setv, getv } from '../../../../lib/keyValueStorageClient.js';
2
+
3
+ export default {
4
+ inputParameters: {
5
+ contextId: ``,
6
+ aiMemory: ``,
7
+ section: `memoryAll`
8
+ },
9
+ model: 'oai-gpt4o',
10
+ resolver: async (_parent, args, _contextValue, _info) => {
11
+ const { contextId, aiMemory, section = 'memoryAll' } = args;
12
+
13
+ // this code helps migrate old memory formats
14
+ if (section === 'memoryLegacy') {
15
+ let savedContext = (getv && (await getv(`${contextId}`))) || {};
16
+ // if savedContext is not an object, set it to an empty object
17
+ if (typeof savedContext !== 'object') {
18
+ savedContext = {};
19
+ }
20
+ savedContext.memoryContext = aiMemory;
21
+ await setv(`${contextId}`, savedContext);
22
+ return aiMemory;
23
+ }
24
+
25
+ const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser'];
26
+
27
+ // Handle single section save
28
+ if (section !== 'memoryAll') {
29
+ if (validSections.includes(section)) {
30
+ await setv(`${contextId}-${section}`, aiMemory);
31
+ }
32
+ return aiMemory;
33
+ }
34
+
35
+ // if the aiMemory is an empty string, set all sections to empty strings
36
+ if (aiMemory.trim() === "") {
37
+ for (const section of validSections) {
38
+ await setv(`${contextId}-${section}`, "");
39
+ }
40
+ return "";
41
+ }
42
+
43
+ // Handle multi-section save
44
+ try {
45
+ const memoryObject = JSON.parse(aiMemory);
46
+ for (const section of validSections) {
47
+ if (section in memoryObject) {
48
+ await setv(`${contextId}-${section}`, memoryObject[section]);
49
+ }
50
+ }
51
+ } catch {
52
+ for (const section of validSections) {
53
+ await setv(`${contextId}-${section}`, "");
54
+ }
55
+ await setv(`${contextId}-memoryUser`, aiMemory);
56
+ }
57
+
58
+ return aiMemory;
59
+ }
60
+ }
@@ -0,0 +1,27 @@
1
+ const AI_MEMORY = `<MEMORIES>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</MEMORIES>`;
2
+
3
+ const AI_MEMORY_INSTRUCTIONS = "You have persistent memories of important details, instructions, and context - make sure you consult your memories when formulating a response to make sure you're applying your learnings. Also included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.";
4
+
5
+ const AI_DIRECTIVES = `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`;
6
+
7
+ const AI_CONVERSATION_HISTORY = "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>";
8
+
9
+ const AI_COMMON_INSTRUCTIONS = "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}";
10
+
11
+ const AI_COMMON_INSTRUCTIONS_MARKDOWN = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.\nYou know the current date and time - it is {{now}}.";
12
+
13
+ const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice, so keep your responses very brief and conversational unless you have been explicitly asked for details. Your responses should sound like natural human conversation.\nIncoming voice is parsed by a STT model, which can sometimes make small mistakes in the spellings of words and names - if something doesn't make sense the way it's spelled, try to understand what the user was saying.\nYour voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\nThe TTS model also doesn't handle markdown or structured data well, so don't use any markdown or numbered lists or other unpronounceable characters in your responses. Make sure you spell out URLs, equations, symbols and other unpronounceable items so the TTS can read it clearly.\nYour responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.\nYou know the current date and time - it is {{now}}.";
14
+
15
+ const AI_EXPERTISE = "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data andthe ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.";
16
+
17
+ export default {
18
+ AI_MEMORY,
19
+ AI_DIRECTIVES,
20
+ AI_COMMON_INSTRUCTIONS,
21
+ AI_COMMON_INSTRUCTIONS_MARKDOWN,
22
+ AI_COMMON_INSTRUCTIONS_VOICE,
23
+ AI_CONVERSATION_HISTORY,
24
+ AI_EXPERTISE,
25
+ AI_MEMORY_INSTRUCTIONS
26
+ };
27
+
@@ -0,0 +1,55 @@
1
+ import { callPathway } from '../../../lib/pathwayTools.js';
2
+ import logger from '../../../lib/logger.js';
3
+ import entityConstants from './shared/sys_entity_constants.js';
4
+
5
+ export default {
6
+ prompt: [],
7
+ useInputChunking: false,
8
+ enableDuplicateRequests: false,
9
+ inputParameters: {
10
+ privateData: false,
11
+ useMemory: true,
12
+ chatHistory: [{role: '', content: []}],
13
+ aiName: "Jarvis",
14
+ contextId: ``,
15
+ indexName: ``,
16
+ semanticConfiguration: ``,
17
+ roleInformation: ``,
18
+ calculateEmbeddings: false,
19
+ language: "English",
20
+ chatId: ``,
21
+ dataSources: [""],
22
+ model: 'oai-gpt4o',
23
+ generatorPathway: 'sys_generator_results'
24
+ },
25
+ timeout: 300,
26
+ ...entityConstants,
27
+ executePathway: async ({args, resolver}) => {
28
+ args = { ...args, ...entityConstants };
29
+
30
+ try {
31
+ // Get the generator pathway name from args or use default
32
+ let generatorPathway = args.generatorPathway || 'sys_generator_results';
33
+
34
+ const newArgs = {
35
+ ...args,
36
+ chatHistory: args.chatHistory.slice(-6)
37
+ };
38
+
39
+ if (generatorPathway === 'sys_generator_document') {
40
+ generatorPathway = 'sys_generator_results';
41
+ newArgs.dataSources = ["mydata"];
42
+ }
43
+
44
+ logger.debug(`Using generator pathway: ${generatorPathway}`);
45
+
46
+ const result = await callPathway(generatorPathway, newArgs, resolver);
47
+
48
+ return args.stream ? "" : result;
49
+
50
+ } catch (e) {
51
+ resolver.logError(e.message ?? e);
52
+ return await callPathway('sys_generator_error', { ...args, text: e.message, stream: false }, resolver);
53
+ }
54
+ }
55
+ };
@@ -0,0 +1,239 @@
1
+ // sys_entity_start.js
2
+ // Beginning of the rag workflow for Jarvis
3
+ import { callPathway, say } from '../../../lib/pathwayTools.js';
4
+ import logger from '../../../lib/logger.js';
5
+ import { chatArgsHasImageUrl } from '../../../lib/util.js';
6
+ import { QueueServiceClient } from '@azure/storage-queue';
7
+ import entityConstants from './shared/sys_entity_constants.js';
8
+
9
+ const TOKEN_RATIO = 0.75;
10
+
11
+ const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
12
+ let queueClient;
13
+
14
+ if (connectionString) {
15
+ const queueName = process.env.AUTOGEN_MESSAGE_QUEUE || "autogen-message-queue";
16
+ const queueClientService = QueueServiceClient.fromConnectionString(connectionString);
17
+ queueClient = queueClientService.getQueueClient(queueName);
18
+ } else {
19
+ logger.warn("Azure Storage connection string is not provided. Queue operations will be unavailable.");
20
+ }
21
+
22
+ async function sendMessageToQueue(data) {
23
+ try {
24
+ if(!queueClient){
25
+ logger.warn("Azure Storage connection string is not provided. Queue operations will be unavailable.");
26
+ return;
27
+ }
28
+ const encodedMessage = Buffer.from(JSON.stringify(data)).toString('base64');
29
+ const result = await queueClient.sendMessage(encodedMessage);
30
+ logger.info(`Message added to queue: ${JSON.stringify(result)}`);
31
+ return result.messageId;
32
+ } catch (error) {
33
+ logger.error("Error sending message:", error);
34
+ }
35
+ }
36
+
37
+ export default {
38
+ useInputChunking: false,
39
+ enableDuplicateRequests: false,
40
+ model: 'oai-gpt4o',
41
+ anthropicModel: 'claude-35-sonnet-vertex',
42
+ openAIModel: 'oai-gpt4o',
43
+ useSingleTokenStream: true,
44
+ inputParameters: {
45
+ privateData: false,
46
+ chatHistory: [{role: '', content: []}],
47
+ contextId: ``,
48
+ indexName: ``,
49
+ semanticConfiguration: ``,
50
+ roleInformation: ``,
51
+ calculateEmbeddings: false,
52
+ dataSources: ["mydata", "aja", "aje", "wires", "bing"],
53
+ language: "English",
54
+ aiName: "Jarvis",
55
+ aiMemorySelfModify: true,
56
+ aiStyle: "OpenAI",
57
+ title: ``,
58
+ messages: [],
59
+ voiceResponse: false,
60
+ },
61
+ timeout: 600,
62
+ tokenRatio: TOKEN_RATIO,
63
+ ...entityConstants,
64
+
65
+ executePathway: async ({args, resolver}) => {
66
+ let title = null;
67
+ let codeRequestId = null;
68
+
69
+ args = {
70
+ ...args,
71
+ ...entityConstants
72
+ };
73
+
74
+ // Limit the chat history to 20 messages to speed up processing
75
+ if (args.messages && args.messages.length > 0) {
76
+ args.chatHistory = args.messages.slice(-20);
77
+ } else {
78
+ args.chatHistory = args.chatHistory.slice(-20);
79
+ }
80
+
81
+ const pathwayResolver = resolver;
82
+ const { anthropicModel, openAIModel } = pathwayResolver.pathway;
83
+
84
+ const styleModel = args.aiStyle === "Anthropic" ? anthropicModel : openAIModel;
85
+
86
+ // if the model has been overridden, make sure to use it
87
+ if (pathwayResolver.modelName) {
88
+ args.model = pathwayResolver.modelName;
89
+ }
90
+
91
+ const fetchChatResponse = async (args, pathwayResolver) => {
92
+ const [chatResponse, chatTitleResponse] = await Promise.all([
93
+ callPathway('sys_generator_quick', {...args, model: styleModel}, pathwayResolver),
94
+ callPathway('chat_title', { ...args, stream: false}),
95
+ ]);
96
+
97
+ title = chatTitleResponse;
98
+
99
+ return chatResponse;
100
+ };
101
+
102
+ const { chatHistory } = args;
103
+
104
+ // start fetching the default response - we may need it later
105
+ let fetchChatResponsePromise;
106
+ if (!args.stream) {
107
+ fetchChatResponsePromise = fetchChatResponse({ ...args }, pathwayResolver);
108
+ }
109
+
110
+ const visionContentPresent = chatArgsHasImageUrl(args);
111
+
112
+ try {
113
+ // Get tool routing response
114
+ const toolRequiredResponse = await callPathway('sys_router_tool', {
115
+ ...args,
116
+ chatHistory: chatHistory.slice(-4),
117
+ stream: false
118
+ });
119
+
120
+ // Asynchronously manage memory for this context
121
+ if (args.aiMemorySelfModify) {
122
+ callPathway('sys_memory_manager', { ...args, stream: false })
123
+ .catch(error => logger.error(error?.message || "Error in sys_memory_manager pathway"));
124
+ }
125
+
126
+ const { toolRequired, toolMessage, toolFunction } = JSON.parse(toolRequiredResponse || '{}');
127
+ let toolCallbackName, toolCallbackId, toolCallbackMessage;
128
+
129
+ logger.info(`toolRequired: ${toolRequired}, toolFunction: ${toolFunction}`);
130
+
131
+ if (toolRequired && toolFunction) {
132
+ switch (toolFunction.toLowerCase()) {
133
+ case "codeexecution":
134
+ {
135
+ const codingRequiredResponse = await callPathway('sys_router_code', { ...args, stream: false });
136
+ let parsedCodingRequiredResponse;
137
+ try {
138
+ parsedCodingRequiredResponse = JSON.parse(codingRequiredResponse || "{}");
139
+ } catch (error) {
140
+ logger.error(`Error parsing codingRequiredResponse: ${error.message}, codingRequiredResponse was: ${codingRequiredResponse}`);
141
+ parsedCodingRequiredResponse = {};
142
+ }
143
+ const { codingRequired } = parsedCodingRequiredResponse;
144
+ if (codingRequired) {
145
+ const { codingMessage, codingTask, codingTaskKeywords } = parsedCodingRequiredResponse;
146
+ const message = typeof codingTask === 'string'
147
+ ? codingTask
148
+ : JSON.stringify(codingTask);
149
+ const { contextId } = args;
150
+ logger.info(`Sending task message coding agent: ${message}`);
151
+ codeRequestId = await sendMessageToQueue({ message, contextId, keywords: codingTaskKeywords });
152
+
153
+ toolCallbackId = codeRequestId;
154
+ toolCallbackName = "coding";
155
+ toolCallbackMessage = codingMessage;
156
+ break;
157
+ }
158
+ }
159
+ break;
160
+ case "image":
161
+ toolCallbackName = 'sys_generator_image';
162
+ toolCallbackMessage = toolMessage;
163
+ break;
164
+ case "vision":
165
+ case "video":
166
+ case "audio":
167
+ case "pdf":
168
+ if (visionContentPresent) {
169
+ toolCallbackName = 'sys_generator_video_vision';
170
+ toolCallbackMessage = toolMessage;
171
+ }
172
+ break;
173
+ case "code":
174
+ case "write":
175
+ toolCallbackName = 'sys_generator_expert';
176
+ toolCallbackMessage = toolMessage;
177
+ break;
178
+ case "reason":
179
+ toolCallbackName = 'sys_generator_reasoning';
180
+ toolCallbackMessage = toolMessage;
181
+ break;
182
+ case "search":
183
+ toolCallbackName = 'sys_generator_results';
184
+ toolCallbackId = null;
185
+ toolCallbackMessage = toolMessage;
186
+ break;
187
+ case "document":
188
+ toolCallbackName = 'sys_generator_document';
189
+ toolCallbackId = null;
190
+ toolCallbackMessage = toolMessage;
191
+ break;
192
+ case "clarify":
193
+ toolCallbackName = null;
194
+ toolCallbackId = null;
195
+ toolCallbackMessage = toolMessage;
196
+ break;
197
+ default:
198
+ toolCallbackName = null;
199
+ toolCallbackId = null;
200
+ toolCallbackMessage = null;
201
+ break;
202
+ }
203
+ }
204
+
205
+ if (toolCallbackMessage) {
206
+ if (args.stream) {
207
+ await say(pathwayResolver.requestId, toolCallbackMessage || "One moment please.", 10);
208
+ pathwayResolver.tool = JSON.stringify({ hideFromModel: false, search: false, title });
209
+ await callPathway('sys_entity_continue', { ...args, stream: true, model: styleModel, generatorPathway: toolCallbackName }, pathwayResolver);
210
+ return "";
211
+ } else {
212
+ pathwayResolver.tool = JSON.stringify({
213
+ hideFromModel: toolCallbackName ? true : false,
214
+ toolCallbackName,
215
+ title,
216
+ search: toolCallbackName === 'sys_generator_results' ? true : false,
217
+ coding: toolCallbackName === 'coding' ? true : false,
218
+ codeRequestId,
219
+ toolCallbackId
220
+ });
221
+ return toolCallbackMessage || "One moment please.";
222
+ }
223
+ }
224
+
225
+ fetchChatResponsePromise = fetchChatResponsePromise || fetchChatResponse({ ...args }, pathwayResolver);
226
+ const chatResponse = await fetchChatResponsePromise;
227
+ pathwayResolver.tool = JSON.stringify({ search: false, title })
228
+ return args.stream ? "" : chatResponse;
229
+
230
+ } catch (e) {
231
+ pathwayResolver.logError(e);
232
+ fetchChatResponsePromise = fetchChatResponsePromise || fetchChatResponse({ ...args }, pathwayResolver);
233
+ const chatResponse = await fetchChatResponsePromise;
234
+ pathwayResolver.tool = JSON.stringify({ search: false, title });
235
+ return args.stream ? "" : chatResponse;
236
+ }
237
+ }
238
+ };
239
+
@@ -0,0 +1,20 @@
1
+ import { Prompt } from '../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\nThe user has requested information that you have already determined can be found in the indexes that you can search, and you were trying to search for it, but encountered the following error: {{{text}}}. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.`},
8
+ "{{chatHistory}}",
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ text: '',
15
+ aiName: "Jarvis",
16
+ language: "English",
17
+ },
18
+ model: 'oai-gpt4o',
19
+ useInputChunking: false,
20
+ }