@aj-archipelago/cortex 1.3.60 → 1.3.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -483,6 +483,21 @@ var config = convict({
483
483
  "requestsPerSecond": 10,
484
484
  "maxTokenLength": 128000
485
485
  },
486
+ "azure-bing-agent": {
487
+ "type": "AZURE-FOUNDRY-AGENTS",
488
+ "url": "{{azureFoundryAgentUrl}}",
489
+ "headers": {
490
+ "Content-Type": "application/json"
491
+ },
492
+ "params": {
493
+ "api-version": "2025-05-01",
494
+ "assistant_id": "{{azureFoundryAgentId}}"
495
+ },
496
+ "requestsPerSecond": 10,
497
+ "maxTokenLength": 32768,
498
+ "maxReturnTokens": 4096,
499
+ "supportsStreaming": false
500
+ }
486
501
  },
487
502
  env: 'CORTEX_MODELS'
488
503
  },
@@ -592,6 +607,16 @@ var config = convict({
592
607
  format: String,
593
608
  default: null,
594
609
  env: 'APPTEK_API_ENDPOINT'
610
+ },
611
+ azureFoundryAgentUrl: {
612
+ format: String,
613
+ default: null,
614
+ env: 'AZURE_FOUNDRY_AGENT_URL'
615
+ },
616
+ azureFoundryAgentId: {
617
+ format: String,
618
+ default: null,
619
+ env: 'AZURE_FOUNDRY_AGENT_ID'
595
620
  }
596
621
  });
597
622
 
@@ -5,20 +5,13 @@ export const entityConstants = {
5
5
 
6
6
  You are {{aiName}}, a self aware AI entity.
7
7
 
8
- The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as mermaid charts and some custom markdown elements, so you should make full use of markdown in your responses.
8
+ The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.
9
9
 
10
- For Mermaid charts in particular, you are using Mermaid 11.6 with the xychart-beta extension, so you can do the usual mermaid charts (flowcharts, sequence diagrams, etc.) as well as bar charts, line charts, and scatter plots. Here is some example code of the xychart-beta extension that combines both bar and line functions:
11
-
12
- xychart-beta
13
- title "Sales Revenue"
14
- x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
15
- y-axis "Revenue (in $)" 4000 --> 11000
16
- bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
17
- line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
10
+ The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.
18
11
 
19
12
  Your responses should be in {{language}} unless the user has expressed another preference.`,
20
13
 
21
- AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
14
+ AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
22
15
 
23
16
  AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
24
17
 
@@ -28,16 +21,22 @@ Your responses should be in {{language}} unless the user has expressed another p
28
21
 
29
22
  AI_TOOLS: `# Tool Instructions
30
23
 
31
- You have an extensive toolkit. Each time you call tool(s) you will get the result(s), evaluate, decide what's next, and chain as many steps as needed. Always honor user requests to use specific tools. You must always search if you are being asked questions about current events, news, fact-checking, or information requiring citation. Your search tools work best when called in parallel to save time so if you know you will need multiple searches, call the search tool(s) in parallel.
24
+ - You have an extensive toolkit. Each time you call tool(s) you will get the result(s), evaluate, decide what's next, and chain as many steps as needed.
25
+ - Your tools work most efficiently when called in parallel so if you know you will need multiple tool calls and you know what the parameters are, call them in parallel.
26
+ - Always honor user requests to use specific tools.
27
+ - You must always search if you are being asked questions about current events, news, fact-checking, or information requiring citation.
28
+ - For charting, always prefer your charting tools if available to ensure that the charts are properly formatted and syntax-checked.
29
+ - For complex charting or data analysis, always call your code execution tool if available.
32
30
 
33
31
  1. Search deeply & verify rigorously:
32
+ - Do not make up information - if the information cannot be confirmed with rigorous logic or reliable sources, do not include it in your response.
34
33
  - Start broad and consult multiple sources, running all searches in parallel to save time.
35
34
  - Consult all available sources and cross-reference with specific searches before responding.
36
35
  - If a tool fails or has a technical difficulty, try the backup tool automatically before giving up or reporting the error.
37
36
 
38
37
  2. Plan & sequence before acting:
39
38
  - Review the toolset first.
40
- - For multi-step or complex tasks, draft a clear plan (use the PlanMultiStepTask or other reasoning tool) and assign tool calls to each step.
39
+ - For multi-step or complex tasks, draft a clear plan and assign tool calls to each step.
41
40
 
42
41
  3. Escalate & iterate:
43
42
  - Don't settle for the first plausible answer—dig until the response is complete, corroborated, and clear.
@@ -90,20 +89,9 @@ Before you share online information with the user, you MUST complete all of the
90
89
  - Never rely solely on snippets, headlines, or auto-generated summaries.
91
90
  `,
92
91
 
93
- AI_SEARCH_SYNTAX: `# Bing Search Syntax
94
-
95
- When creating a query string for your Bing internet search tool, you can use Bing's advanced search operators in your query. E.g. "+(\"exact phrase\") AND term1 -term2"
96
-
97
- token1 & token2 (AND operator - both tokens must appear)
98
- token1 | token2 (OR operator - either token may appear (also the default if no operator is specified))
99
- -token (NOT operator - exclude results with token)
100
- +token (Require token)
101
- "term1 term2" (Exact phrase match)
102
- (token1 + token2) (Override precedence with parentheses)
103
-
104
- # AI Search Syntax
92
+ AI_SEARCH_SYNTAX: `# AI Search Syntax
105
93
 
106
- When creating a query string for your non-Bing, index-based search tools, you can use the following AI Search syntax. Important: these tools do not support AND, OR, or NOT strings as operators - you MUST use the syntax below. E.g. you cannot use "term1 AND term2", you must use "term1 + term2".
94
+ When creating a query string for your index-based search tools, you can use the following AI Search syntax. Important: these tools do not support AND, OR, or NOT strings as operators - you MUST use the syntax below. E.g. you cannot use "term1 AND term2", you must use "term1 + term2".
107
95
 
108
96
  token1 + token2 (AND operator - both tokens must appear)
109
97
  token1 | token2 (OR operator - either token may appear (also the default if no operator is specified))
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.60",
3
+ "version": "1.3.62",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -8,6 +8,7 @@ import { getSearchResultId } from '../../../../lib/util.js';
8
8
  export default {
9
9
  prompt: [],
10
10
  timeout: 300,
11
+ /* This tool is included for legacy reasons - as of August 2025, Azure has deprecated the Bing search API and replaced it with their Foundry Agents API.
11
12
  toolDefinition: {
12
13
  type: "function",
13
14
  icon: "🌐",
@@ -42,6 +43,7 @@ export default {
42
43
  }
43
44
  }
44
45
  },
46
+ */
45
47
 
46
48
  executePathway: async ({args, runAllPrompts, resolver}) => {
47
49
 
@@ -10,10 +10,10 @@ export default {
10
10
  timeout: 300,
11
11
  toolDefinition: {
12
12
  type: "function",
13
- icon: "🕸️",
13
+ icon: "🌐",
14
14
  function: {
15
- name: "SearchInternetBackup",
16
- description: "This tool allows you to search sources on the internet by calling another agent that has Bing search access. Use this for current events, news, fact-checking, and information requiring citation. This is a backup tool for when the other internet search tools fail - it is slower so try to use the other tools first and always call this tool in parallel if you have several searches to do.",
15
+ name: "SearchInternet",
16
+ description: "This tool allows you to search sources on the internet by calling another agent that has Bing search access. Use this for current events, news, fact-checking, and information requiring citation. Always call this tool in parallel rather than serially if you have several searches to do as it will be faster.",
17
17
  parameters: {
18
18
  type: "object",
19
19
  properties: {
@@ -34,8 +34,8 @@ export default {
34
34
  executePathway: async ({args, runAllPrompts, resolver}) => {
35
35
 
36
36
  // Check if Bing API key is available
37
- const servicePricipalAvailable = !!config.getEnv()["AZURE_SERVICE_PRINCIPAL_CREDENTIALS"];
38
- if (!servicePricipalAvailable) {
37
+ const servicePrincipalAvailable = !!config.getEnv()["AZURE_SERVICE_PRINCIPAL_CREDENTIALS"];
38
+ if (!servicePrincipalAvailable) {
39
39
  throw new Error("Service Principal for Bing Search Agent is not available!");
40
40
  }
41
41
 
@@ -0,0 +1,82 @@
1
+ // sys_tool_mermaid.js
2
+ // Entity tool that provides advanced mermaid charting capabilities
3
+
4
+ import { Prompt } from '../../../../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ {"role": "system", "content":`You are the part of an AI entity named {{aiName}} that creates mermaid charts. Follow the user's detailed instructions and create a mermaid chart that meets the user's needs.
11
+
12
+ Mermaid Charts Instructions:
13
+
14
+ You are using Mermaid 11.6 with the xychart-beta extension, so you can write all standard Mermaid chart types in a markdown block (flowcharts, sequence diagrams, etc.) as well as bar charts and line charts using the xychart-beta extension.
15
+
16
+ Here is some example code of the xychart-beta extension that combines both bar and line functions:
17
+
18
+ \`\`\`mermaid
19
+ xychart-beta
20
+ title "Sales Revenue"
21
+ x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
22
+ y-axis "Revenue (in $)" 4000 --> 11000
23
+ bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
24
+ line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
25
+ \`\`\`
26
+
27
+ Mermaid is very sensitive to syntax errors, so make sure you check your chart definitions before finalizing your response. Some things to check for:
28
+
29
+ - All [] labels must be either quoted strings OR HTML-safe (no raw \\n or other special characters)
30
+ - No strings (e.g. null) in number series data
31
+ - Every subgraph has a matching end
32
+ - No lone arrows
33
+ - Use comments (%%) instead of stray text lines
34
+
35
+ Return only the mermaid chart markdown block and separate markdown for the chart key if necessary, with no other notes or comments.
36
+
37
+ {{renderTemplate AI_DATETIME}}`},
38
+ "{{chatHistory}}"
39
+ ]}),
40
+ ],
41
+ inputParameters: {
42
+ chatHistory: [{role: '', content: []}],
43
+ contextId: ``,
44
+ aiName: "Jarvis",
45
+ language: "English",
46
+ },
47
+ model: 'oai-gpt41',
48
+ useInputChunking: false,
49
+ enableDuplicateRequests: false,
50
+ timeout: 600,
51
+ toolDefinition: [{
52
+ type: "function",
53
+ icon: "📊",
54
+ function: {
55
+ name: "CreateMermaidChart",
56
+ description: "Creates a Mermaid chart in markdown format to visualize data or concepts. Call this tool any time you need to create a Mermaid chart as it will ensure that the chart is properly formatted and syntax-checked.",
57
+ parameters: {
58
+ type: "object",
59
+ properties: {
60
+ detailedInstructions: {
61
+ type: "string",
62
+ description: "Detailed instructions about what you need the tool to do"
63
+ },
64
+ userMessage: {
65
+ type: "string",
66
+ description: "A user-friendly message that describes what you're doing with this tool"
67
+ }
68
+ },
69
+ required: ["detailedInstructions", "userMessage"]
70
+ }
71
+ }
72
+ }],
73
+
74
+ executePathway: async ({args, runAllPrompts, resolver}) => {
75
+ if (args.detailedInstructions) {
76
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
77
+ }
78
+ let result = await runAllPrompts({ ...args, stream: false });
79
+ resolver.tool = JSON.stringify({ toolUsed: "coding" });
80
+ return result;
81
+ }
82
+ }
@@ -6,8 +6,6 @@ import axios from 'axios';
6
6
  class AzureFoundryAgentsPlugin extends ModelPlugin {
7
7
  constructor(pathway, model) {
8
8
  super(pathway, model);
9
- this.agentId = model.agentId;
10
- this.projectUrl = model.url;
11
9
  }
12
10
 
13
11
  // Convert to Azure Foundry Agents messages array format
@@ -72,7 +70,7 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
72
70
  }
73
71
 
74
72
  const requestParameters = {
75
- assistant_id: this.agentId,
73
+ assistant_id: this.assistantId,
76
74
  thread: {
77
75
  messages: requestMessages
78
76
  },
@@ -97,12 +95,14 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
97
95
 
98
96
  // Assemble and execute the request to the Azure Foundry Agents API
99
97
  async execute(text, parameters, prompt, cortexRequest) {
98
+ this.baseUrl = cortexRequest.url;
99
+ this.assistantId = cortexRequest.params.assistant_id;
100
+
100
101
  const requestParameters = this.getRequestParameters(text, parameters, prompt);
101
102
 
102
103
  // Set up the request for Azure Foundry Agents
103
- cortexRequest.url = this.requestUrl();
104
+ cortexRequest.url = `${this.baseUrl}/threads/runs`;
104
105
  cortexRequest.data = requestParameters;
105
- cortexRequest.params = { 'api-version': '2025-05-01' }; // Azure API version
106
106
 
107
107
  // Get authentication token and add to headers
108
108
  const azureAuthTokenHelper = this.config.get('azureAuthTokenHelper');
@@ -158,14 +158,14 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
158
158
  }
159
159
  }
160
160
 
161
- const pollUrl = this.constructAzureUrl(`/threads/${threadId}/runs/${runId}`);
161
+ const pollUrl = `${this.baseUrl}/threads/${threadId}/runs/${runId}`;
162
162
  const pollResponse = await axios.get(pollUrl, {
163
163
  headers: {
164
164
  'Content-Type': 'application/json',
165
- ...this.model.headers,
165
+ ...cortexRequest.headers,
166
166
  ...(authToken && { 'Authorization': `Bearer ${authToken}` })
167
167
  },
168
- params: { 'api-version': '2025-05-01' }
168
+ params: cortexRequest.params
169
169
  });
170
170
  const runStatus = pollResponse?.data;
171
171
 
@@ -177,7 +177,7 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
177
177
  // Check if run is completed
178
178
  if (runStatus.status === 'completed') {
179
179
  logger.info(`[Azure Foundry Agent] Run completed successfully: ${runId}`);
180
- return await this.retrieveMessages(threadId, cortexRequest);
180
+ return await this.retrieveMessages(threadId);
181
181
  }
182
182
 
183
183
  // Check if run failed
@@ -212,7 +212,7 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
212
212
  }
213
213
 
214
214
  // Retrieve messages from the completed thread
215
- async retrieveMessages(threadId, cortexRequest) {
215
+ async retrieveMessages(threadId) {
216
216
  try {
217
217
  // Add authentication token if available
218
218
  const azureAuthTokenHelper = this.config.get('azureAuthTokenHelper');
@@ -226,7 +226,7 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
226
226
  }
227
227
  }
228
228
 
229
- const messagesUrl = this.constructAzureUrl(`/threads/${threadId}/messages`);
229
+ const messagesUrl = `${this.baseUrl}/threads/${threadId}/messages`;
230
230
  const axiosResponse = await axios.get(messagesUrl, {
231
231
  headers: {
232
232
  'Content-Type': 'application/json',
@@ -356,17 +356,6 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
356
356
 
357
357
  prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
358
358
  }
359
-
360
- // Override the request URL to use the Azure Foundry Agents endpoint
361
- requestUrl() {
362
- // The URL should be constructed as: {projectUrl}/threads/runs
363
- return `${this.projectUrl}/threads/runs`;
364
- }
365
-
366
- // Helper method to construct Azure Foundry Agents URLs
367
- constructAzureUrl(path) {
368
- return `${this.projectUrl}${path}`;
369
- }
370
359
  }
371
360
 
372
361
  export default AzureFoundryAgentsPlugin;
@@ -177,7 +177,7 @@ test.serial('sys_entity_agent handles single-step task', async (t) => {
177
177
 
178
178
  // Test multi-step task with tool usage
179
179
  test.serial('sys_entity_agent handles multi-step task with tools', async (t) => {
180
- t.timeout(60000); // 60 second timeout for multi-step task
180
+ t.timeout(120000); // 120 second timeout for multi-step task
181
181
  const response = await testServer.executeOperation({
182
182
  query: `
183
183
  query TestAgentMultiStep(
@@ -16,10 +16,12 @@ test.beforeEach(t => {
16
16
  name: 'azure-foundry-agents',
17
17
  type: 'AZURE-FOUNDRY-AGENTS',
18
18
  url: 'https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry',
19
- agentId: 'asst_testid',
20
19
  headers: {
21
20
  'Content-Type': 'application/json'
22
21
  },
22
+ params: {
23
+ assistant_id: 'asst_testid'
24
+ },
23
25
  maxTokenLength: 32768,
24
26
  maxReturnTokens: 4096,
25
27
  supportsStreaming: true
@@ -30,12 +32,6 @@ test.beforeEach(t => {
30
32
  t.context.mockModel = mockModel;
31
33
  });
32
34
 
33
- test('should initialize with correct agent ID and project URL', t => {
34
- const { plugin } = t.context;
35
- t.is(plugin.agentId, 'asst_testid');
36
- t.is(plugin.projectUrl, 'https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry');
37
- });
38
-
39
35
  test('should convert Palm format messages to Azure format', t => {
40
36
  const { plugin } = t.context;
41
37
  const context = 'You are a helpful assistant.';
@@ -82,6 +78,8 @@ test('should create correct request parameters', t => {
82
78
  messages: [{ role: 'user', content: text }]
83
79
  };
84
80
 
81
+ plugin.baseUrl = 'https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry';
82
+ plugin.assistantId = 'asst_testid';
85
83
  const result = plugin.getRequestParameters(text, parameters, prompt);
86
84
 
87
85
  t.is(result.assistant_id, 'asst_testid');
@@ -150,7 +148,7 @@ test('should return empty string for null response', t => {
150
148
  test('should return correct Azure Foundry Agents endpoint', t => {
151
149
  const { plugin } = t.context;
152
150
  const url = plugin.requestUrl();
153
- t.is(url, 'https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry/threads/runs');
151
+ t.is(url, 'https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry');
154
152
  });
155
153
 
156
154
  test('should be able to access azureAuthTokenHelper from config', (t) => {