@aj-archipelago/cortex 1.3.35 → 1.3.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -9
  2. package/config/default.example.json +0 -20
  3. package/config.js +160 -6
  4. package/lib/pathwayTools.js +79 -1
  5. package/lib/requestExecutor.js +3 -1
  6. package/lib/util.js +7 -0
  7. package/package.json +1 -1
  8. package/pathways/basePathway.js +2 -0
  9. package/pathways/call_tools.js +379 -0
  10. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +1 -1
  11. package/pathways/system/entity/memory/sys_search_memory.js +2 -2
  12. package/pathways/system/entity/sys_entity_agent.js +289 -0
  13. package/pathways/system/entity/sys_generator_memory.js +1 -1
  14. package/pathways/system/entity/sys_generator_results.js +1 -1
  15. package/pathways/system/entity/sys_get_entities.js +19 -0
  16. package/pathways/system/entity/tools/shared/sys_entity_tools.js +150 -0
  17. package/pathways/system/entity/tools/sys_tool_bing_search.js +147 -0
  18. package/pathways/system/entity/tools/sys_tool_callmodel.js +62 -0
  19. package/pathways/system/entity/tools/sys_tool_coding.js +53 -0
  20. package/pathways/system/entity/tools/sys_tool_codingagent.js +100 -0
  21. package/pathways/system/entity/tools/sys_tool_cognitive_search.js +231 -0
  22. package/pathways/system/entity/tools/sys_tool_image.js +57 -0
  23. package/pathways/system/entity/tools/sys_tool_readfile.js +119 -0
  24. package/pathways/system/entity/tools/sys_tool_reasoning.js +75 -0
  25. package/pathways/system/entity/tools/sys_tool_remember.js +59 -0
  26. package/pathways/vision.js +1 -1
  27. package/server/modelExecutor.js +4 -12
  28. package/server/pathwayResolver.js +53 -40
  29. package/server/plugins/azureBingPlugin.js +42 -4
  30. package/server/plugins/azureCognitivePlugin.js +40 -12
  31. package/server/plugins/claude3VertexPlugin.js +67 -18
  32. package/server/plugins/modelPlugin.js +3 -2
  33. package/server/plugins/openAiReasoningPlugin.js +3 -3
  34. package/server/plugins/openAiReasoningVisionPlugin.js +48 -0
  35. package/server/plugins/openAiVisionPlugin.js +192 -7
  36. package/tests/agentic.test.js +256 -0
  37. package/tests/call_tools.test.js +216 -0
  38. package/tests/claude3VertexToolConversion.test.js +78 -0
  39. package/tests/mocks.js +11 -3
  40. package/tests/multimodal_conversion.test.js +1 -1
  41. package/tests/openAiToolPlugin.test.js +242 -0
  42. package/pathways/test_palm_chat.js +0 -31
  43. package/server/plugins/palmChatPlugin.js +0 -233
  44. package/server/plugins/palmCodeCompletionPlugin.js +0 -45
  45. package/server/plugins/palmCompletionPlugin.js +0 -135
  46. package/tests/palmChatPlugin.test.js +0 -219
  47. package/tests/palmCompletionPlugin.test.js +0 -58
@@ -0,0 +1,379 @@
1
+ // call_tools.js
2
+ // Uses OpenAI's tool calling API
3
+ import { callPathway, say } from '../lib/pathwayTools.js';
4
+ import logger from '../lib/logger.js';
5
+ import { config } from '../config.js';
6
+ import { Prompt } from '../server/prompt.js';
7
+
8
+ // Define the available tools in OpenAI's tool calling format
9
+ const TOOLS = [
10
+ {
11
+ type: "function",
12
+ function: {
13
+ name: "SearchMemory",
14
+ description: "Use specifically to search your long term memory for information or details that may not be present in your short term memory.",
15
+ parameters: {
16
+ type: "object",
17
+ properties: {
18
+ detailedInstructions: {
19
+ type: "string",
20
+ description: "Detailed instructions about what you need the tool to do"
21
+ }
22
+ },
23
+ required: ["detailedInstructions"]
24
+ }
25
+ }
26
+ },
27
+ {
28
+ type: "function",
29
+ function: {
30
+ name: "Search",
31
+ description: "Use for current events, news, fact-checking, and information requiring citation. This tool allows you to search the internet, all Al Jazeera news articles and the latest news wires from multiple sources.",
32
+ parameters: {
33
+ type: "object",
34
+ properties: {
35
+ detailedInstructions: {
36
+ type: "string",
37
+ description: "Detailed instructions about what you need the tool to do"
38
+ }
39
+ },
40
+ required: ["detailedInstructions"]
41
+ }
42
+ }
43
+ },
44
+ {
45
+ type: "function",
46
+ function: {
47
+ name: "Document",
48
+ description: "Access user's personal document index. Use for user-specific uploaded information.",
49
+ parameters: {
50
+ type: "object",
51
+ properties: {
52
+ detailedInstructions: {
53
+ type: "string",
54
+ description: "Detailed instructions about what you need the tool to do"
55
+ }
56
+ },
57
+ required: ["detailedInstructions"]
58
+ }
59
+ }
60
+ },
61
+ {
62
+ type: "function",
63
+ function: {
64
+ name: "Write",
65
+ description: "Engage for any task related to composing, editing, or refining written content.",
66
+ parameters: {
67
+ type: "object",
68
+ properties: {
69
+ detailedInstructions: {
70
+ type: "string",
71
+ description: "Detailed instructions about what you need the tool to do"
72
+ }
73
+ },
74
+ required: ["detailedInstructions"]
75
+ }
76
+ }
77
+ },
78
+ {
79
+ type: "function",
80
+ function: {
81
+ name: "Image",
82
+ description: "Use when asked to create, generate, or revise visual content.",
83
+ parameters: {
84
+ type: "object",
85
+ properties: {
86
+ detailedInstructions: {
87
+ type: "string",
88
+ description: "Detailed instructions about the image(s) you want to create"
89
+ }
90
+ },
91
+ required: ["detailedInstructions"]
92
+ }
93
+ }
94
+ },
95
+ {
96
+ type: "function",
97
+ function: {
98
+ name: "Code",
99
+ description: "Engage for any programming-related tasks, including creating, modifying, reviewing, or explaining code.",
100
+ parameters: {
101
+ type: "object",
102
+ properties: {
103
+ detailedInstructions: {
104
+ type: "string",
105
+ description: "Detailed instructions about what you need the tool to do"
106
+ }
107
+ },
108
+ required: ["detailedInstructions"]
109
+ }
110
+ }
111
+ },
112
+ {
113
+ type: "function",
114
+ function: {
115
+ name: "CodeExecution",
116
+ description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks.",
117
+ parameters: {
118
+ type: "object",
119
+ properties: {
120
+ detailedInstructions: {
121
+ type: "string",
122
+ description: "Detailed instructions about what you need the tool to do"
123
+ }
124
+ },
125
+ required: ["detailedInstructions"]
126
+ }
127
+ }
128
+ },
129
+ {
130
+ type: "function",
131
+ function: {
132
+ name: "Reason",
133
+ description: "Employ for reasoning, scientific analysis, evaluating evidence, strategic planning, problem-solving, logic puzzles, mathematical calculations, or any questions that require careful thought or complex choices.",
134
+ parameters: {
135
+ type: "object",
136
+ properties: {
137
+ detailedInstructions: {
138
+ type: "string",
139
+ description: "Detailed instructions about what you need the tool to do"
140
+ }
141
+ },
142
+ required: ["detailedInstructions"]
143
+ }
144
+ }
145
+ },
146
+ {
147
+ type: "function",
148
+ function: {
149
+ name: "PDF",
150
+ description: "Use specifically for analyzing and answering questions about PDF file content.",
151
+ parameters: {
152
+ type: "object",
153
+ properties: {
154
+ detailedInstructions: {
155
+ type: "string",
156
+ description: "Detailed instructions about what you need the tool to do"
157
+ }
158
+ },
159
+ required: ["detailedInstructions"]
160
+ }
161
+ }
162
+ },
163
+ {
164
+ type: "function",
165
+ function: {
166
+ name: "Vision",
167
+ description: "Use specifically for analyzing and answering questions about image files (jpg, gif, bmp, png, etc).",
168
+ parameters: {
169
+ type: "object",
170
+ properties: {
171
+ detailedInstructions: {
172
+ type: "string",
173
+ description: "Detailed instructions about what you need the tool to do"
174
+ }
175
+ },
176
+ required: ["detailedInstructions"]
177
+ }
178
+ }
179
+ },
180
+ {
181
+ type: "function",
182
+ function: {
183
+ name: "Video",
184
+ description: "Use specifically for analyzing and answering questions about video or audio file content.",
185
+ parameters: {
186
+ type: "object",
187
+ properties: {
188
+ detailedInstructions: {
189
+ type: "string",
190
+ description: "Detailed instructions about what you need the tool to do"
191
+ }
192
+ },
193
+ required: ["detailedInstructions"]
194
+ }
195
+ }
196
+ }
197
+ ];
198
+
199
+ export default {
200
+ useInputChunking: false,
201
+ enableDuplicateRequests: false,
202
+ useSingleTokenStream: false,
203
+ inputParameters: {
204
+ chatHistory: [{role: '', content: []}],
205
+ contextId: ``,
206
+ language: "English",
207
+ aiName: "Jarvis",
208
+ aiStyle: "OpenAI",
209
+ model: 'oai-gpt41',
210
+ },
211
+ timeout: 600,
212
+
213
+ executePathway: async ({args, runAllPrompts, resolver}) => {
214
+ let pathwayResolver = resolver;
215
+
216
+ // add the entity constants to the args
217
+ args = {
218
+ ...args,
219
+ ...config.get('entityConstants')
220
+ };
221
+
222
+ // set the style model if applicable
223
+ const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI } = args;
224
+ const styleModel = aiStyle === "Anthropic" ? AI_STYLE_ANTHROPIC : AI_STYLE_OPENAI;
225
+
226
+ const promptMessages = [
227
+ {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_TOOLS}}\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_MEMORY_DIRECTIVES}}\n{{renderTemplate AI_DATETIME}}`},
228
+ "{{chatHistory}}",
229
+ ];
230
+
231
+ pathwayResolver.pathwayPrompt = [
232
+ new Prompt({ messages: promptMessages }),
233
+ ];
234
+
235
+ // Initialize chat history if needed
236
+ if (!args.chatHistory || args.chatHistory.length === 0) {
237
+ args.chatHistory = [];
238
+ }
239
+
240
+ try {
241
+ let currentMessages = [...args.chatHistory];
242
+ let finalResponse = null;
243
+
244
+ while (!finalResponse) {
245
+ const response = await runAllPrompts({
246
+ ...args,
247
+ chatHistory: currentMessages,
248
+ tools: TOOLS,
249
+ tool_choice: "auto",
250
+ stream: false
251
+ });
252
+
253
+ // If response is a string, treat it as the final response
254
+ if (typeof response === 'string') {
255
+ finalResponse = response;
256
+ break;
257
+ }
258
+
259
+ // Check if the model made any tool calls
260
+ const toolCalls = response.tool_calls || [];
261
+
262
+ if (toolCalls.length > 0) {
263
+ // Execute all tool calls in parallel
264
+ const toolResults = await Promise.all(toolCalls.map(async (toolCall) => {
265
+ try {
266
+ const toolArgs = JSON.parse(toolCall.function.arguments);
267
+ const toolFunction = toolCall.function.name.toLowerCase();
268
+
269
+ // Set the appropriate generator pathway based on the tool function
270
+ let generatorPathway;
271
+ switch (toolFunction) {
272
+ case "codeexecution":
273
+ generatorPathway = 'sys_router_code';
274
+ break;
275
+ case "image":
276
+ generatorPathway = 'sys_generator_image';
277
+ break;
278
+ case "vision":
279
+ case "video":
280
+ case "audio":
281
+ case "pdf":
282
+ case "text":
283
+ generatorPathway = 'sys_generator_video_vision';
284
+ break;
285
+ case "code":
286
+ case "write":
287
+ generatorPathway = 'sys_generator_expert';
288
+ break;
289
+ case "reason":
290
+ generatorPathway = 'sys_generator_reasoning';
291
+ break;
292
+ case "search":
293
+ generatorPathway = 'sys_generator_results';
294
+ break;
295
+ case "document":
296
+ generatorPathway = 'sys_generator_document';
297
+ break;
298
+ case "searchmemory":
299
+ generatorPathway = 'sys_generator_memory';
300
+ break;
301
+ default:
302
+ generatorPathway = 'sys_generator_quick';
303
+ break;
304
+ }
305
+
306
+ // Call sys_entity_continue with the appropriate generator pathway
307
+ const toolResult = await callPathway('sys_entity_continue', {
308
+ ...args,
309
+ chatHistory: [{
310
+ role: 'user',
311
+ content: toolArgs.detailedInstructions || toolArgs.lastUserMessage
312
+ }],
313
+ generatorPathway,
314
+ detailedInstructions: toolArgs.detailedInstructions || toolArgs.lastUserMessage
315
+ }, resolver);
316
+
317
+ // Add the tool call to the chat history
318
+ currentMessages.push({
319
+ role: "assistant",
320
+ content: "",
321
+ tool_calls: [{
322
+ id: toolCall.id,
323
+ type: "function",
324
+ function: {
325
+ name: toolCall.function.name,
326
+ arguments: JSON.stringify(toolArgs)
327
+ }
328
+ }]
329
+ });
330
+
331
+ // Add the tool result to the chat history
332
+ currentMessages.push({
333
+ role: "tool",
334
+ tool_call_id: toolCall.id,
335
+ name: toolCall.function.name,
336
+ content: JSON.stringify(toolResult)
337
+ });
338
+
339
+ return { success: true, result: toolResult };
340
+ } catch (error) {
341
+ logger.error(`Error executing tool ${toolCall.function.name}: ${error.message}`);
342
+
343
+ // Add the error to the chat history
344
+ currentMessages.push({
345
+ role: "tool",
346
+ tool_call_id: toolCall.id,
347
+ name: toolCall.function.name,
348
+ content: `Error: ${error.message}`
349
+ });
350
+
351
+ return { success: false, error: error.message };
352
+ }
353
+ }));
354
+
355
+ // Check if any tool calls failed
356
+ const failedTools = toolResults.filter(result => !result.success);
357
+ if (failedTools.length > 0) {
358
+ logger.warn(`Some tool calls failed: ${failedTools.map(t => t.error).join(', ')}`);
359
+ }
360
+ } else {
361
+ // No tool calls, this is the final response
362
+ finalResponse = response.content;
363
+ }
364
+ }
365
+
366
+ // Update the chat history with the final messages
367
+ args.chatHistory = currentMessages.filter(msg => msg.role !== "tool");
368
+
369
+ // Return the final response
370
+ return finalResponse;
371
+
372
+ } catch (e) {
373
+ resolver.logError(e);
374
+ const chatResponse = await callPathway('sys_generator_quick', {...args, model: styleModel}, resolver);
375
+ resolver.tool = JSON.stringify({ search: false, title: args.title });
376
+ return args.stream ? null : chatResponse;
377
+ }
378
+ }
379
+ };
@@ -132,7 +132,7 @@ const addToolCalls = (chatHistory, toolArgs, toolName, toolCallId = getUniqueId(
132
132
  const addToolResults = (chatHistory, result, toolCallId) => {
133
133
  const toolResult = {
134
134
  "role": "tool",
135
- "content": result,
135
+ "content": typeof result === 'string' ? result : JSON.stringify(result),
136
136
  "tool_call_id": toolCallId
137
137
  };
138
138
  chatHistory.push(toolResult);
@@ -13,7 +13,7 @@ export default {
13
13
  },
14
14
  {
15
15
  "role": "user",
16
- "content": "<MEMORY>\n{{{sectionMemory}}}\n</MEMORY>\n<CONVERSATION>\n{{{toJSON chatHistory}}}\n</CONVERSATION>\nAnalyze the current contents of this section of your memory and the conversation and return any information relevant for you to use in your response."
16
+ "content": "<MEMORY>\n{{{sectionMemory}}}\n</MEMORY>\n<CONVERSATION>\n{{{toJSON chatHistory}}}\n</CONVERSATION>\nAnalyze the current contents of this section of your memory and the conversation and return any information relevant for you to use in your response. Accuracy is critical. You must never make up or hallucinate information - if you don't see it in the memory, you must return 'No relevant information found.'"
17
17
  },
18
18
  ]
19
19
  }),
@@ -26,7 +26,7 @@ export default {
26
26
  section: "memoryAll",
27
27
  updateContext: false
28
28
  },
29
- model: 'oai-gpt4o',
29
+ model: 'oai-gpt41-mini',
30
30
  useInputChunking: false,
31
31
  enableDuplicateRequests: false,
32
32
  timeout: 300,