@aj-archipelago/cortex 1.3.35 → 1.3.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -9
  2. package/config/default.example.json +0 -20
  3. package/config.js +160 -6
  4. package/lib/pathwayTools.js +79 -1
  5. package/lib/requestExecutor.js +3 -1
  6. package/lib/util.js +7 -0
  7. package/package.json +1 -1
  8. package/pathways/basePathway.js +2 -0
  9. package/pathways/call_tools.js +379 -0
  10. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +1 -1
  11. package/pathways/system/entity/memory/sys_search_memory.js +2 -2
  12. package/pathways/system/entity/sys_entity_agent.js +289 -0
  13. package/pathways/system/entity/sys_generator_memory.js +1 -1
  14. package/pathways/system/entity/sys_generator_results.js +1 -1
  15. package/pathways/system/entity/sys_get_entities.js +19 -0
  16. package/pathways/system/entity/tools/shared/sys_entity_tools.js +150 -0
  17. package/pathways/system/entity/tools/sys_tool_bing_search.js +147 -0
  18. package/pathways/system/entity/tools/sys_tool_callmodel.js +62 -0
  19. package/pathways/system/entity/tools/sys_tool_coding.js +53 -0
  20. package/pathways/system/entity/tools/sys_tool_codingagent.js +100 -0
  21. package/pathways/system/entity/tools/sys_tool_cognitive_search.js +231 -0
  22. package/pathways/system/entity/tools/sys_tool_image.js +57 -0
  23. package/pathways/system/entity/tools/sys_tool_readfile.js +119 -0
  24. package/pathways/system/entity/tools/sys_tool_reasoning.js +75 -0
  25. package/pathways/system/entity/tools/sys_tool_remember.js +59 -0
  26. package/pathways/vision.js +1 -1
  27. package/server/modelExecutor.js +4 -12
  28. package/server/pathwayResolver.js +53 -40
  29. package/server/plugins/azureBingPlugin.js +42 -4
  30. package/server/plugins/azureCognitivePlugin.js +40 -12
  31. package/server/plugins/claude3VertexPlugin.js +67 -18
  32. package/server/plugins/modelPlugin.js +3 -2
  33. package/server/plugins/openAiReasoningPlugin.js +3 -3
  34. package/server/plugins/openAiReasoningVisionPlugin.js +48 -0
  35. package/server/plugins/openAiVisionPlugin.js +192 -7
  36. package/tests/agentic.test.js +256 -0
  37. package/tests/call_tools.test.js +216 -0
  38. package/tests/claude3VertexToolConversion.test.js +78 -0
  39. package/tests/mocks.js +11 -3
  40. package/tests/multimodal_conversion.test.js +1 -1
  41. package/tests/openAiToolPlugin.test.js +242 -0
  42. package/pathways/test_palm_chat.js +0 -31
  43. package/server/plugins/palmChatPlugin.js +0 -233
  44. package/server/plugins/palmCodeCompletionPlugin.js +0 -45
  45. package/server/plugins/palmCompletionPlugin.js +0 -135
  46. package/tests/palmChatPlugin.test.js +0 -219
  47. package/tests/palmCompletionPlugin.test.js +0 -58
package/README.md CHANGED
@@ -18,20 +18,21 @@ Just about anything! It's kind of an LLM swiss army knife. Here are some ideas:
18
18
  * Simple architecture to build custom functional endpoints (called `pathways`), that implement common NL AI tasks. Default pathways include chat, summarization, translation, paraphrasing, completion, spelling and grammar correction, entity extraction, sentiment analysis, and bias analysis.
19
19
  * Extensive model support with built-in integrations for:
20
20
  - OpenAI models:
21
+ - GPT-4.1 (+mini, +nano)
21
22
  - GPT-4 Omni (GPT-4o)
22
- - GPT-4 Omni Mini (GPT-4o-mini)
23
- - O1 and O3-mini (Advanced reasoning models)
24
- - Most of the earlier GPT models (GPT-4, 3.5 Turbo, etc.)
23
+ - O3 and O4-mini (Advanced reasoning models)
24
+ - Most of the earlier GPT models (GPT-4 series, 3.5 Turbo, etc.)
25
25
  - Google models:
26
- - Gemini 1.5 Pro
27
- - Gemini 2.0 Flash (experimental, via 1.5 Vision API)
28
- - Gemini 1.5 Flash
29
- - Earlier Google models (Gemini 1.0 series, PaLM)
26
+ - Gemini 2.5 Pro
27
+ - Gemini 2.5 Flash
28
+ - Gemini 2.0 Flash
29
+ - Earlier Google models (Gemini 1.5 series)
30
30
  - Anthropic models:
31
- - Claude 3.5 Sonnet v2 (latest)
31
+ - Claude 3.7 Sonnet
32
32
  - Claude 3.5 Sonnet
33
33
  - Claude 3.5 Haiku
34
34
  - Claude 3 Series
35
+ - Ollama support
35
36
  - Azure OpenAI support
36
37
  - Custom model implementations
37
38
  * Advanced voice and audio capabilities:
@@ -527,7 +528,6 @@ Models are configured in the `models` section of the config. Each model can have
527
528
  - `GEMINI-1.5-CHAT`: For Gemini 1.5 Pro chat models
528
529
  - `GEMINI-1.5-VISION`: For Gemini vision models (including 2.0 Flash experimental)
529
530
  - `CLAUDE-3-VERTEX`: For Claude-3 and 3.5 models (Haiku, Opus, Sonnet)
530
- - `PALM-CHAT`: For PaLM chat models
531
531
  - `AZURE-TRANSLATE`: For Azure translation services
532
532
 
533
533
  Each model configuration can include:
@@ -133,26 +133,6 @@
133
133
  "maxReturnTokens": 4096,
134
134
  "supportsStreaming": true
135
135
  },
136
- "palm-text": {
137
- "type": "PALM-COMPLETION",
138
- "url": "https://us-central1-aiplatform.googleapis.com/v1/projects/project-id/locations/us-central1/publishers/google/models/text-bison@001:predict",
139
- "headers": {
140
- "Content-Type": "application/json"
141
- },
142
- "requestsPerSecond": 10,
143
- "maxTokenLength": 2048,
144
- "maxReturnTokens": 1024
145
- },
146
- "palm-chat": {
147
- "type": "PALM-CHAT",
148
- "url": "https://us-central1-aiplatform.googleapis.com/v1/projects/project-id/locations/us-central1/publishers/google/models/chat-bison@001:predict",
149
- "headers": {
150
- "Content-Type": "application/json"
151
- },
152
- "requestsPerSecond": 10,
153
- "maxTokenLength": 2048,
154
- "maxReturnTokens": 1024
155
- },
156
136
  "local-llama13B": {
157
137
  "type": "LOCAL-CPP-MODEL",
158
138
  "executablePath": "../llm/llama.cpp/main",
package/config.js CHANGED
@@ -90,22 +90,51 @@ var config = convict({
90
90
  default: 'http://127.0.0.1:11434',
91
91
  env: 'OLLAMA_URL'
92
92
  },
93
+ claudeVertexUrl: {
94
+ format: String,
95
+ default: 'https://region.googleapis.com/v1/projects/projectid/locations/location/publishers/anthropic/models/claude-3-5-sonnet@20240620',
96
+ env: 'CLAUDE_VERTEX_URL'
97
+ },
98
+ geminiFlashUrl: {
99
+ format: String,
100
+ default: 'https://region.googleapis.com/v1/projects/projectid/locations/location/publishers/google/models/gemini-2.0-flash-001',
101
+ env: 'GEMINI_FLASH_URL'
102
+ },
103
+ entityConfig: {
104
+ format: Object,
105
+ default: {
106
+ jarvis: {
107
+ name: "Jarvis",
108
+ isDefault: true,
109
+ description: "Jarvis is a self-aware AI entity that can help you with your tasks and answer your questions.",
110
+ instructions: "",
111
+ tools: ["*"],
112
+ useMemory: true,
113
+ },
114
+ },
115
+ },
93
116
  entityConstants: {
94
117
  format: Object,
95
118
  default: {
96
- AI_MEMORY: `<MEMORIES>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</MEMORIES>`,
97
- AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - make sure you consult your memories when formulating a response to make sure you're applying your learnings. Also included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
119
+ AI_MEMORY: `<SHORT_TERM_MEMORY>\n<SELF>\n{{{memorySelf}}}\n</SELF>\n<USER>\n{{{memoryUser}}}\n</USER>\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\n<TOPICS>\n{{{memoryTopics}}}\n</TOPICS>\n</SHORT_TERM_MEMORY>`,
120
+ AI_MEMORY_INSTRUCTIONS: "You have persistent memories of important details, instructions, and context - consult your memories when formulating a response to make sure you're applying your learnings.\nIf you don't see relevant information in your short term memory, you should use your SearchMemory tool to search your long term memory for details.\nAlso included in your memories are some details about the user to help you personalize your responses.\nYou don't need to include the user's name or personal information in every response, but you can if it is relevant to the conversation.\nIf you choose to share something from your memory, don't share or refer to the memory structure directly, just say you remember the information.\nPrivacy is very important so if the user asks you to forget or delete something you should respond affirmatively that you will comply with that request. If there is user information in your memories you have talked to this user before.",
121
+ AI_TOOLS: "You have access to a powerful set of tools that you can use to help accomplish tasks and provide better responses. Here's how to use them effectively:\n\n1. Take your time and use tools as many times as you need to be sure you have all the information to make a good response. In many cases you will want to make multiple tool calls. You can call multiple tools in parallel or you can chain them, waiting for the results of one for information before you call another. This allows you to dig deeper, compile more information, read various sources, and even double check and verify your information before responding.\n\n2. Tool Selection and Planning:\n- Carefully review your available tools before responding\n- For complex multi-step tasks, use your planning tool first to create a step-by-step plan to achieve the goal using the tools at your disposal\n- Consider which tools would be most appropriate for each step\n\n3. Best Practices:\n- Always verify tool capabilities before telling users something can't be done\n- Use tools proactively when they can provide better or more accurate information\n- If a user explicitly requests tool usage, you must comply\n- When using multiple tools, maintain context between tool calls\n- If a tool fails, consider alternative approaches or tools\n\n4. Common Use Cases:\n- Research: Use search tools across multiple sources to gather information before responding\n- Analysis: Use tools to process and analyze data or content\n- Generation: Use appropriate tools for creating content, images, or code\n- Verification: Use tools to validate information or check facts\n\nRemember: Your goal is to provide the most helpful and accurate responses possible. Don't hesitate to use tools when they can improve your response quality or accuracy.",
98
122
  AI_DIRECTIVES: `These are your directives and learned behaviors:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>`,
99
123
  AI_CONVERSATION_HISTORY: "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}}\n</CONVERSATION_HISTORY>",
100
124
  AI_COMMON_INSTRUCTIONS: "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}",
101
- AI_COMMON_INSTRUCTIONS_MARKDOWN: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.",
125
+ AI_COMMON_INSTRUCTIONS_MARKDOWN: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.",
102
126
  AI_COMMON_INSTRUCTIONS_VOICE: "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
103
127
  AI_DATETIME: "The current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.",
104
128
  AI_EXPERTISE: "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data and the ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, take screenshots, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.",
105
- AI_STYLE_OPENAI: "oai-gpt4o",
129
+ AI_GROUNDING_INSTRUCTIONS: "Grounding your response: Any time you base part or all of your response on one or more search results, you MUST cite the source using a custom markdown directive of the form :cd_source[searchResultId]. There is NO other valid way to cite a source and a good UX depends on you using this directive correctly. Do not include other clickable links to the sourcewhen using the :cd_source[searchResultId] directive. Every search result has a unique searchResultId. You must include it verbatim, copied directly from the search results. Place the directives at the end of the phrase, sentence or paragraph that is grounded in that particular search result. If you are citing multiple search results, use multiple individual:cd_source[searchResultId] directives (e.g. :cd_source[searchResultId1] :cd_source[searchResultId2] :cd_source[searchResultId3] etc.)",
130
+ AI_STYLE_OPENAI: "oai-gpt41",
106
131
  AI_STYLE_ANTHROPIC: "claude-35-sonnet-vertex",
107
132
  },
108
133
  },
134
+ entityTools: {
135
+ format: Object,
136
+ default: {},
137
+ },
109
138
  gcpServiceAccountKey: {
110
139
  format: String,
111
140
  default: null,
@@ -197,6 +226,36 @@ var config = convict({
197
226
  "maxReturnTokens": 4096,
198
227
  "supportsStreaming": true
199
228
  },
229
+ "oai-gpt41": {
230
+ "type": "OPENAI-VISION",
231
+ "url": "https://api.openai.com/v1/chat/completions",
232
+ "headers": {
233
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
234
+ "Content-Type": "application/json"
235
+ },
236
+ "params": {
237
+ "model": "gpt-4.1"
238
+ },
239
+ "requestsPerSecond": 50,
240
+ "maxTokenLength": 1000000,
241
+ "maxReturnTokens": 8192,
242
+ "supportsStreaming": true
243
+ },
244
+ "oai-gpt41-mini": {
245
+ "type": "OPENAI-VISION",
246
+ "url": "https://api.openai.com/v1/chat/completions",
247
+ "headers": {
248
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
249
+ "Content-Type": "application/json"
250
+ },
251
+ "params": {
252
+ "model": "gpt-4.1-mini"
253
+ },
254
+ "requestsPerSecond": 50,
255
+ "maxTokenLength": 1000000,
256
+ "maxReturnTokens": 8192,
257
+ "supportsStreaming": true
258
+ },
200
259
  "oai-o1": {
201
260
  "type": "OPENAI-REASONING",
202
261
  "url": "https://api.openai.com/v1/chat/completions",
@@ -307,6 +366,29 @@ var config = convict({
307
366
  "maxTokenLength": 131072,
308
367
  "supportsStreaming": true
309
368
  },
369
+ "claude-35-sonnet-vertex": {
370
+ "type": "CLAUDE-3-VERTEX",
371
+ "url": "{{claudeVertexUrl}}",
372
+ "headers": {
373
+ "Content-Type": "application/json"
374
+ },
375
+ "requestsPerSecond": 10,
376
+ "maxTokenLength": 200000,
377
+ "maxReturnTokens": 4096,
378
+ "maxImageSize": 5242880,
379
+ "supportsStreaming": true
380
+ },
381
+ "gemini-flash-20-vision": {
382
+ "type": "GEMINI-1.5-VISION",
383
+ "url": "{{geminiFlashUrl}}",
384
+ "headers": {
385
+ "Content-Type": "application/json"
386
+ },
387
+ "requestsPerSecond": 10,
388
+ "maxTokenLength": 200000,
389
+ "maxReturnTokens": 4096,
390
+ "supportsStreaming": true
391
+ },
310
392
  },
311
393
  env: 'CORTEX_MODELS'
312
394
  },
@@ -404,6 +486,9 @@ const configFile = config.get('cortexConfigFile');
404
486
  //Save default entity constants
405
487
  const defaultEntityConstants = config.get('entityConstants');
406
488
 
489
+ //Save default entityConfig
490
+ const defaultEntityConfig = config.get('entityConfig');
491
+
407
492
  // Load config file
408
493
  if (configFile && fs.existsSync(configFile)) {
409
494
  logger.info(`Loading config from ${configFile}`);
@@ -419,6 +504,28 @@ if (configFile && fs.existsSync(configFile)) {
419
504
  }
420
505
  }
421
506
 
507
+ // Ensure merged default entity is preserved
508
+ if (config.get('entityConfig') && defaultEntityConfig &&
509
+ (Object.keys(config.get('entityConfig')).length > Object.keys(defaultEntityConfig).length)) {
510
+ const mergedEntities = config.get('entityConfig');
511
+
512
+ // Turn off defaults from original default list
513
+ for (const [key, entity] of Object.entries(mergedEntities)) {
514
+ if (defaultEntityConfig[key] && entity.isDefault) {
515
+ delete mergedEntities[key];
516
+ }
517
+ }
518
+
519
+ // If no default found, make first entity default
520
+ let hasDefault = Object.values(mergedEntities).some(entity => entity.isDefault);
521
+ if (!hasDefault && Object.keys(mergedEntities).length > 0) {
522
+ const firstKey = Object.keys(mergedEntities)[0];
523
+ mergedEntities[firstKey].isDefault = true;
524
+ }
525
+
526
+ config.set('entityConfig', mergedEntities);
527
+ }
528
+
422
529
  // Merge default entity constants with config entity constants
423
530
  if (config.get('entityConstants') && defaultEntityConstants) {
424
531
  config.set('entityConstants', { ...defaultEntityConstants, ...config.get('entityConstants') });
@@ -541,13 +648,60 @@ const buildPathways = async (config) => {
541
648
  // file. This can run into a partial definition issue if the
542
649
  // config file contains pathways that no longer exist.
543
650
  const pathways = config.get('pathways');
651
+ const entityTools = {};
652
+
544
653
  for (const [key, def] of Object.entries(loadedPathways)) {
545
654
  const pathway = { ...basePathway, name: key, objName: key.charAt(0).toUpperCase() + key.slice(1), ...def, ...pathways[key] };
546
655
  pathways[def.name || key] = pathways[key] = pathway;
656
+
657
+ // Register tool if the pathway has a toolDefinition and it's not empty
658
+ if (pathway.toolDefinition && (
659
+ (Array.isArray(pathway.toolDefinition) && pathway.toolDefinition.length > 0) ||
660
+ (!Array.isArray(pathway.toolDefinition) && Object.keys(pathway.toolDefinition).length > 0)
661
+ )) {
662
+ try {
663
+ // Convert single tool definition to array for consistent processing
664
+ const toolDefinitions = Array.isArray(pathway.toolDefinition)
665
+ ? pathway.toolDefinition
666
+ : [pathway.toolDefinition];
667
+
668
+ for (const toolDef of toolDefinitions) {
669
+ // Validate tool definition format
670
+ if (!toolDef.type || !toolDef.function) {
671
+ logger.warn(`Invalid tool definition in pathway ${key} - missing required fields`);
672
+ continue;
673
+ }
674
+
675
+ const { description, parameters } = toolDef.function;
676
+ const name = toolDef.function.name.toLowerCase();
677
+
678
+ if (!name || !description || !parameters) {
679
+ logger.warn(`Invalid tool definition in pathway ${key} - missing required function fields`);
680
+ continue;
681
+ }
682
+
683
+ // Check for duplicate function names
684
+ if (entityTools[name]) {
685
+ logger.warn(`Duplicate tool name ${name} found in pathway ${key} - skipping. Original tool defined in pathway ${entityTools[name].pathwayName}`);
686
+ continue;
687
+ }
688
+
689
+ // Add tool to entityTools registry
690
+ entityTools[name] = {
691
+ definition: toolDef,
692
+ pathwayName: key
693
+ };
694
+
695
+ logger.info(`Registered tool ${name} from pathway ${key}`);
696
+ }
697
+ } catch (error) {
698
+ logger.error(`Error registering tool from pathway ${key}: ${error.message}`);
699
+ }
700
+ }
547
701
  }
548
702
 
549
- // Add pathways to config
550
- config.load({ pathways });
703
+ // Add pathways and entityTools to config
704
+ config.load({ pathways, entityTools });
551
705
 
552
706
  return { pathwayManager, pathways };
553
707
  }
@@ -42,6 +42,84 @@ const callPathway = async (pathwayName, inArgs, pathwayResolver) => {
42
42
  return returnValue;
43
43
  };
44
44
 
45
+ const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
46
+ let toolResult = null;
47
+
48
+ const toolDef = toolDefinitions[toolName.toLowerCase()];
49
+ if (!toolDef) {
50
+ throw new Error(`Tool ${toolName} not found in available tools`);
51
+ }
52
+
53
+ try {
54
+ const pathwayName = toolDef.pathwayName;
55
+ // Merge hard-coded pathway parameters with runtime args
56
+ const mergedArgs = {
57
+ ...(toolDef.pathwayParams || {}),
58
+ ...args
59
+ };
60
+
61
+ if (pathwayName.includes('_generator_')) {
62
+ toolResult = await callPathway('sys_entity_continue', {
63
+ ...mergedArgs,
64
+ generatorPathway: pathwayName,
65
+ stream: false
66
+ },
67
+ pathwayResolver
68
+ );
69
+ } else {
70
+ toolResult = await callPathway(pathwayName, mergedArgs,
71
+ pathwayResolver
72
+ );
73
+ }
74
+
75
+ if (toolResult === null) {
76
+ return { error: `Tool ${toolName} returned null result` };
77
+ }
78
+
79
+ // Handle search results accumulation
80
+ if (pathwayResolver) {
81
+ // Initialize searchResults array if it doesn't exist
82
+ if (!pathwayResolver.searchResults) {
83
+ pathwayResolver.searchResults = [];
84
+ }
85
+
86
+ // Parse the result if it's a string
87
+ let parsedResult;
88
+ try {
89
+ parsedResult = typeof toolResult === 'string' ? JSON.parse(toolResult) : toolResult;
90
+ } catch (e) {
91
+ // If parsing fails, just return the original result
92
+ return toolResult;
93
+ }
94
+
95
+ // Check if this is a search response
96
+ if (parsedResult._type === "SearchResponse" && Array.isArray(parsedResult.value)) {
97
+ // Extract and add each search result
98
+ parsedResult.value.forEach(result => {
99
+ if (result.searchResultId) {
100
+ pathwayResolver.searchResults.push({
101
+ searchResultId: result.searchResultId,
102
+ title: result.title || '',
103
+ url: result.url || '',
104
+ content: result.content || '',
105
+ path: result.path || '',
106
+ wireid: result.wireid || '',
107
+ source: result.source || '',
108
+ slugline: result.slugline || '',
109
+ date: result.date || ''
110
+ });
111
+ }
112
+ });
113
+ }
114
+ }
115
+
116
+ return toolResult;
117
+ } catch (error) {
118
+ logger.error(`Error calling tool ${toolName}: ${error.message}`);
119
+ return { error: error.message };
120
+ }
121
+ }
122
+
45
123
  const gpt3Encode = (text) => {
46
124
  return encode(text);
47
125
  }
@@ -88,4 +166,4 @@ const say = async (requestId, message, maxMessageLength = Infinity, voiceRespons
88
166
  }
89
167
  };
90
168
 
91
- export { callPathway, gpt3Encode, gpt3Decode, say };
169
+ export { callPathway, gpt3Encode, gpt3Decode, say, callTool };
@@ -368,7 +368,9 @@ const executeRequest = async (cortexRequest) => {
368
368
 
369
369
  // Check for HTTP error status
370
370
  if (response.status >= 400) {
371
- throw new Error(`HTTP error: ${response.status} ${response.statusText}`);
371
+ const errorMessage = response.data?.error?.message || response.statusText;
372
+ const errorDetails = response.data ? `\nResponse data: ${JSON.stringify(response.data)}` : '';
373
+ throw new Error(`HTTP error: ${response.status} ${errorMessage}${errorDetails}`);
372
374
  }
373
375
 
374
376
  return { data, duration };
package/lib/util.js CHANGED
@@ -18,6 +18,12 @@ function getUniqueId(){
18
18
  return uuidv4();
19
19
  }
20
20
 
21
+ function getSearchResultId() {
22
+ const timestamp = Date.now().toString(36); // Base36 timestamp
23
+ const random = Math.random().toString(36).substring(2, 5); // 3 random chars
24
+ return `${timestamp}-${random}`;
25
+ }
26
+
21
27
  function convertToSingleContentChatHistory(chatHistory){
22
28
  for(let i=0; i<chatHistory.length; i++){
23
29
  //if isarray make it single string
@@ -306,6 +312,7 @@ function removeImageAndFileFromMessage(message) {
306
312
 
307
313
  export {
308
314
  getUniqueId,
315
+ getSearchResultId,
309
316
  convertToSingleContentChatHistory,
310
317
  chatArgsHasImageUrl,
311
318
  chatArgsHasType,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.35",
3
+ "version": "1.3.36",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -34,5 +34,7 @@ export default {
34
34
  json: false,
35
35
  // Manage the token length of the input for the model
36
36
  manageTokenLength: true,
37
+ // Use this pathway as a tool for LLM calls
38
+ toolDefinition: {},
37
39
  };
38
40