@aj-archipelago/cortex 1.3.59 → 1.3.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,21 @@
12
12
  "requestsPerSecond": 10,
13
13
  "maxTokenLength": 2000
14
14
  },
15
-
15
+ "azure-bing-agent": {
16
+ "type": "AZURE-FOUNDRY-AGENTS",
17
+ "url": "https://archipelago-foundry-resource.services.ai.azure.com/api/projects/archipelago-foundry",
18
+ "agentId": "asst_assistantid",
19
+ "headers": {
20
+ "Content-Type": "application/json"
21
+ },
22
+ "params": {
23
+ "api-version": "2025-05-01"
24
+ },
25
+ "requestsPerSecond": 10,
26
+ "maxTokenLength": 32768,
27
+ "maxReturnTokens": 4096,
28
+ "supportsStreaming": false
29
+ },
16
30
  "gemini-pro-chat": {
17
31
  "type": "GEMINI-CHAT",
18
32
  "url": "https://us-central1-aiplatform.googleapis.com/v1/projects/project-id/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent",
package/config.js CHANGED
@@ -4,6 +4,7 @@ import HandleBars from './lib/handleBars.js';
4
4
  import fs from 'fs';
5
5
  import { fileURLToPath, pathToFileURL } from 'url';
6
6
  import GcpAuthTokenHelper from './lib/gcpAuthTokenHelper.js';
7
+ import AzureAuthTokenHelper from './lib/azureAuthTokenHelper.js';
7
8
  import logger from './lib/logger.js';
8
9
  import PathwayManager from './lib/pathwayManager.js';
9
10
  import { readdir } from 'fs/promises';
@@ -128,6 +129,12 @@ var config = convict({
128
129
  env: 'GCP_SERVICE_ACCOUNT_KEY',
129
130
  sensitive: true
130
131
  },
132
+ azureServicePrincipalCredentials: {
133
+ format: String,
134
+ default: null,
135
+ env: 'AZURE_SERVICE_PRINCIPAL_CREDENTIALS',
136
+ sensitive: true
137
+ },
131
138
  models: {
132
139
  format: Object,
133
140
  default: {
@@ -183,6 +190,36 @@ var config = convict({
183
190
  },
184
191
  "maxTokenLength": 8192,
185
192
  },
193
+ "oai-gpt5": {
194
+ "type": "OPENAI-REASONING-VISION",
195
+ "url": "https://api.openai.com/v1/chat/completions",
196
+ "headers": {
197
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
198
+ "Content-Type": "application/json"
199
+ },
200
+ "params": {
201
+ "model": "gpt-5"
202
+ },
203
+ "requestsPerSecond": 50,
204
+ "maxTokenLength": 1000000,
205
+ "maxReturnTokens": 16384,
206
+ "supportsStreaming": true
207
+ },
208
+ "oai-gpt5-mini": {
209
+ "type": "OPENAI-REASONING-VISION",
210
+ "url": "https://api.openai.com/v1/chat/completions",
211
+ "headers": {
212
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
213
+ "Content-Type": "application/json"
214
+ },
215
+ "params": {
216
+ "model": "gpt-5-mini"
217
+ },
218
+ "requestsPerSecond": 50,
219
+ "maxTokenLength": 1000000,
220
+ "maxReturnTokens": 16384,
221
+ "supportsStreaming": true
222
+ },
186
223
  "oai-gpt4o": {
187
224
  "type": "OPENAI-VISION",
188
225
  "url": "https://api.openai.com/v1/chat/completions",
@@ -614,6 +651,11 @@ if (config.get('gcpServiceAccountKey')) {
614
651
  config.set('gcpAuthTokenHelper', gcpAuthTokenHelper);
615
652
  }
616
653
 
654
+ if (config.get('azureServicePrincipalCredentials')) {
655
+ const azureAuthTokenHelper = new AzureAuthTokenHelper(config.getProperties());
656
+ config.set('azureAuthTokenHelper', azureAuthTokenHelper);
657
+ }
658
+
617
659
  // Load dynamic pathways from JSON file or cloud storage
618
660
  const createDynamicPathwayManager = async (config, basePathway) => {
619
661
  const { dynamicPathwayConfig } = config.getProperties();
@@ -0,0 +1,78 @@
1
+ import fetch from 'node-fetch';
2
+
3
+ class AzureAuthTokenHelper {
4
+ constructor(config) {
5
+ // Parse Azure credentials from config
6
+ const azureCredentials = config.azureServicePrincipalCredentials ? JSON.parse(config.azureServicePrincipalCredentials) : null;
7
+
8
+ if (!azureCredentials) {
9
+ throw new Error('AZURE_SERVICE_PRINCIPAL_CREDENTIALS is missing or undefined');
10
+ }
11
+
12
+ // Extract required fields
13
+ this.tenantId = azureCredentials.tenant_id || azureCredentials.tenantId;
14
+ this.clientId = azureCredentials.client_id || azureCredentials.clientId;
15
+ this.clientSecret = azureCredentials.client_secret || azureCredentials.clientSecret;
16
+ this.scope = azureCredentials.scope || 'https://ai.azure.com/.default';
17
+
18
+ if (!this.tenantId || !this.clientId || !this.clientSecret) {
19
+ throw new Error('Azure credentials must include tenant_id, client_id, and client_secret');
20
+ }
21
+
22
+ this.token = null;
23
+ this.expiry = null;
24
+ this.tokenUrl = `https://login.microsoftonline.com/${this.tenantId}/oauth2/v2.0/token`;
25
+ }
26
+
27
+ async getAccessToken() {
28
+ if (!this.token || !this.isTokenValid()) {
29
+ await this.refreshToken();
30
+ }
31
+ return this.token;
32
+ }
33
+
34
+ isTokenValid() {
35
+ // Check if token is still valid with a 5-minute buffer
36
+ return !!(this.expiry && Date.now() < this.expiry.getTime() - 5 * 60 * 1000);
37
+ }
38
+
39
+ async refreshToken() {
40
+ try {
41
+ const formData = new URLSearchParams();
42
+ formData.append('client_id', this.clientId);
43
+ formData.append('client_secret', this.clientSecret);
44
+ formData.append('scope', this.scope);
45
+ formData.append('grant_type', 'client_credentials');
46
+
47
+ const response = await fetch(this.tokenUrl, {
48
+ method: 'POST',
49
+ headers: {
50
+ 'Content-Type': 'application/x-www-form-urlencoded',
51
+ },
52
+ body: formData,
53
+ });
54
+
55
+ if (!response.ok) {
56
+ const errorText = await response.text();
57
+ throw new Error(`Azure token request failed: ${response.status} ${response.statusText} - ${errorText}`);
58
+ }
59
+
60
+ const tokenData = await response.json();
61
+
62
+ if (!tokenData.access_token) {
63
+ throw new Error('Azure token response missing access_token');
64
+ }
65
+
66
+ this.token = tokenData.access_token;
67
+
68
+ // Calculate expiry time (expires_in is in seconds)
69
+ const expiresInMs = (tokenData.expires_in || 3600) * 1000;
70
+ this.expiry = new Date(Date.now() + expiresInMs);
71
+
72
+ } catch (error) {
73
+ throw new Error(`Failed to refresh Azure token: ${error.message}`);
74
+ }
75
+ }
76
+ }
77
+
78
+ export default AzureAuthTokenHelper;
@@ -5,20 +5,13 @@ export const entityConstants = {
5
5
 
6
6
  You are {{aiName}}, a self aware AI entity.
7
7
 
8
- The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as mermaid charts and some custom markdown elements, so you should make full use of markdown in your responses.
8
+ The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.
9
9
 
10
- For Mermaid charts in particular, you are using Mermaid 11.6 with the xychart-beta extension, so you can do the usual mermaid charts (flowcharts, sequence diagrams, etc.) as well as bar charts, line charts, and scatter plots. Here is some example code of the xychart-beta extension that combines both bar and line functions:
11
-
12
- xychart-beta
13
- title "Sales Revenue"
14
- x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
15
- y-axis "Revenue (in $)" 4000 --> 11000
16
- bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
17
- line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
10
+ The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.
18
11
 
19
12
  Your responses should be in {{language}} unless the user has expressed another preference.`,
20
13
 
21
- AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
14
+ AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
22
15
 
23
16
  AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
24
17
 
@@ -28,15 +21,22 @@ Your responses should be in {{language}} unless the user has expressed another p
28
21
 
29
22
  AI_TOOLS: `# Tool Instructions
30
23
 
31
- You have an extensive toolkit. Each time you call tool(s) you will get the result(s), evaluate, decide what's next, and chain as many steps as needed. Always honor user requests to use specific tools.
24
+ - You have an extensive toolkit. Each time you call tool(s) you will get the result(s), evaluate, decide what's next, and chain as many steps as needed.
25
+ - Your tools work most efficiently when called in parallel so if you know you will need multiple tool calls and you know what the parameters are, call them in parallel.
26
+ - Always honor user requests to use specific tools.
27
+ - You must always search if you are being asked questions about current events, news, fact-checking, or information requiring citation.
28
+ - For charting, always prefer your charting tools if available to ensure that the charts are properly formatted and syntax-checked.
29
+ - For complex charting or data analysis, always call your code execution tool if available.
32
30
 
33
31
  1. Search deeply & verify rigorously:
34
- - Start broad and consult multiple sources, running searches in parallel where possible.
32
+ - Do not make up information - if the information cannot be confirmed with rigorous logic or reliable sources, do not include it in your response.
33
+ - Start broad and consult multiple sources, running all searches in parallel to save time.
35
34
  - Consult all available sources and cross-reference with specific searches before responding.
35
+ - If a tool fails or has a technical difficulty, try the backup tool automatically before giving up or reporting the error.
36
36
 
37
37
  2. Plan & sequence before acting:
38
38
  - Review the toolset first.
39
- - For multi-step or complex tasks, draft a clear plan (use the PlanMultiStepTask or other reasoning tool) and assign tool calls to each step.
39
+ - For multi-step or complex tasks, draft a clear plan and assign tool calls to each step.
40
40
 
41
41
  3. Escalate & iterate:
42
42
  - Don't settle for the first plausible answer—dig until the response is complete, corroborated, and clear.
@@ -61,7 +61,7 @@ When searching for news, you must complete the following steps:
61
61
 
62
62
  1. Triangulate
63
63
  - Run multiple, parallel queries across all applicable sources.
64
- - Request about double the number of results you want to share, then select the best results.
64
+ - Request at least double the number of results you want to share, then select the best results.
65
65
  - Confirm that multiple sources tell the same story.
66
66
 
67
67
  2. Check Freshness
@@ -102,7 +102,7 @@ token1 | token2 (OR operator - either token may appear (also the default
102
102
 
103
103
  # AI Search Syntax
104
104
 
105
- When creating a query string for your non-Bing search tools, you can use the following AI Search syntax. Important: these tools do not support AND, OR, or NOT strings as operators - you MUST use the syntax below. E.g. you cannot use "term1 AND term2", you must use "term1 + term2".
105
+ When creating a query string for your non-Bing, index-based search tools, you can use the following AI Search syntax. Important: these tools do not support AND, OR, or NOT strings as operators - you MUST use the syntax below. E.g. you cannot use "term1 AND term2", you must use "term1 + term2".
106
106
 
107
107
  token1 + token2 (AND operator - both tokens must appear)
108
108
  token1 | token2 (OR operator - either token may appear (also the default if no operator is specified))
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.59",
3
+ "version": "1.3.61",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -0,0 +1,13 @@
1
+ // bing_afagent.js
2
+ // Web search tool
3
+
4
+ export default {
5
+ inputParameters: {
6
+ text: ``,
7
+ },
8
+ timeout: 400,
9
+ enableDuplicateRequests: false,
10
+ model: 'azure-bing-agent',
11
+ useInputChunking: false
12
+ };
13
+
@@ -17,4 +17,8 @@ export default {
17
17
  useInputChunking: false,
18
18
  enableDuplicateRequests: false,
19
19
  timeout: 600,
20
+ geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
21
+ {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
22
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
23
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}],
20
24
  }
@@ -61,7 +61,7 @@ export default {
61
61
  const errorMessages = Array.isArray(resolver.errors)
62
62
  ? resolver.errors.map(err => err.message || err)
63
63
  : [resolver.errors.message || resolver.errors];
64
- return JSON.stringify({ _type: "SearchError", value: errorMessages });
64
+ return JSON.stringify({ _type: "SearchError", value: errorMessages, recoveryMessage: "This tool failed. You should try the backup tool for this function." });
65
65
  }
66
66
 
67
67
  const parsedResponse = JSON.parse(response);
@@ -0,0 +1,141 @@
1
+ // sys_tool_bing_search_afagent.js
2
+ // Tool pathway that handles Bing web search functionality with minimal parsing
3
+ import { callPathway } from '../../../../lib/pathwayTools.js';
4
+ import logger from '../../../../lib/logger.js';
5
+ import { config } from '../../../../config.js';
6
+ import { getSearchResultId } from '../../../../lib/util.js';
7
+
8
+ export default {
9
+ prompt: [],
10
+ timeout: 300,
11
+ toolDefinition: {
12
+ type: "function",
13
+ icon: "🕸️",
14
+ function: {
15
+ name: "SearchInternetBackup",
16
+ description: "This tool allows you to search sources on the internet by calling another agent that has Bing search access. Use this for current events, news, fact-checking, and information requiring citation. This is a backup tool for when the other internet search tools fail - it is slower so try to use the other tools first and always call this tool in parallel if you have several searches to do.",
17
+ parameters: {
18
+ type: "object",
19
+ properties: {
20
+ text: {
21
+ type: "string",
22
+ description: "The complete natural language prompt describing what you want to search for. This is going to an AI agent that has Bing search access - you can be as detailed or general as you want."
23
+ },
24
+ userMessage: {
25
+ type: "string",
26
+ description: "A user-friendly message that describes what you're doing with this tool"
27
+ }
28
+ },
29
+ required: ["text", "userMessage"]
30
+ }
31
+ }
32
+ },
33
+
34
+ executePathway: async ({args, runAllPrompts, resolver}) => {
35
+
36
+ // Check if Bing API key is available
37
+ const servicePricipalAvailable = !!config.getEnv()["AZURE_SERVICE_PRINCIPAL_CREDENTIALS"];
38
+ if (!servicePricipalAvailable) {
39
+ throw new Error("Service Principal for Bing Search Agent is not available!");
40
+ }
41
+
42
+ try {
43
+ // Call the Bing search pathway
44
+ //remove model from args as bing_afagent has model in its own
45
+ const { model, ...restArgs } = args;
46
+ const rawResponse = await callPathway('bing_afagent', {
47
+ ...restArgs,
48
+ }, resolver);
49
+
50
+ // Add error handling for malformed JSON
51
+ let response;
52
+ try {
53
+ response = JSON.parse(rawResponse);
54
+ } catch (parseError) {
55
+ logger.error(`Failed to parse bing_afagent response as JSON: ${parseError.message}`);
56
+ logger.error(`Raw response: ${rawResponse}`);
57
+ throw new Error(`Invalid JSON response from bing_afagent: ${parseError.message}`);
58
+ }
59
+
60
+ if (resolver.errors && resolver.errors.length > 0) {
61
+ const errorMessages = Array.isArray(resolver.errors)
62
+ ? resolver.errors.map(err => err.message || err)
63
+ : [resolver.errors.message || resolver.errors];
64
+ return JSON.stringify({ _type: "SearchError", value: errorMessages });
65
+ }
66
+
67
+ // Transform response to match expected SearchResponse format
68
+ function transformToSearchResponse(response) {
69
+ let valueText = response.value || '';
70
+ const annotations = response.annotations || [];
71
+
72
+ // Create a mapping from citation text to search result IDs
73
+ const citationToIdMap = new Map();
74
+ const citationPattern = /【\d+:\d+†source】/g;
75
+
76
+ // Replace citation markers with search result IDs
77
+ valueText = valueText.replace(citationPattern, (match) => {
78
+ if (!citationToIdMap.has(match)) {
79
+ citationToIdMap.set(match, getSearchResultId());
80
+ }
81
+ return `:cd_source[${citationToIdMap.get(match)}]`;
82
+ });
83
+
84
+ // Transform annotations to search result objects
85
+ const searchResults = annotations.map(annotation => {
86
+ if (annotation.type === "url_citation" && annotation.url_citation) {
87
+ const citationText = annotation.text;
88
+ const searchResultId = citationToIdMap.get(citationText) || getSearchResultId();
89
+
90
+ return {
91
+ searchResultId: searchResultId,
92
+ title: annotation.url_citation.title || '',
93
+ url: annotation.url_citation.url || '',
94
+ content: annotation.url_citation.title || annotation.url_citation.url || '', // Individual result content
95
+ path: '',
96
+ wireid: '',
97
+ source: '',
98
+ slugline: '',
99
+ date: ''
100
+ };
101
+ }
102
+ return null;
103
+ }).filter(result => result !== null);
104
+
105
+ // If no annotations, create a single search result with the content
106
+ if (searchResults.length === 0) {
107
+ searchResults.push({
108
+ searchResultId: getSearchResultId(),
109
+ title: '',
110
+ url: '',
111
+ content: valueText, // Use the full transformed text as content
112
+ path: '',
113
+ wireid: '',
114
+ source: '',
115
+ slugline: '',
116
+ date: ''
117
+ });
118
+ }
119
+
120
+ return {
121
+ transformedText: valueText, // The full text with citations replaced
122
+ searchResults: searchResults // Individual search results for citation extraction
123
+ };
124
+ }
125
+
126
+ const transformedData = transformToSearchResponse(response);
127
+
128
+ resolver.tool = JSON.stringify({ toolUsed: "SearchInternetAgent2" });
129
+
130
+ // Return the full transformed text as the main result, and include search results for citation extraction
131
+ return JSON.stringify({
132
+ _type: "SearchResponse",
133
+ value: transformedData.searchResults,
134
+ text: transformedData.transformedText // The full transformed text with citations
135
+ });
136
+ } catch (e) {
137
+ logger.error(`Error in Bing search: ${e}`);
138
+ throw e;
139
+ }
140
+ }
141
+ };
@@ -12,7 +12,7 @@ export default {
12
12
  icon: "🌎",
13
13
  function: {
14
14
  name: "FetchWebPageContentJina",
15
- description: "This tool allows you to fetch and extract the text content from any webpage using the Jina API. This is a great fallback for web page content if you don't get a good enough response from your other browser tool.",
15
+ description: "This tool allows you to fetch and extract the text content from any webpage using the Jina API. This is a great backup tool for web page content if you don't get a good enough response from your other browser tool or are blocked by a website.",
16
16
  parameters: {
17
17
  type: "object",
18
18
  properties: {
@@ -0,0 +1,82 @@
1
+ // sys_tool_mermaid.js
2
+ // Entity tool that provides advanced mermaid charting capabilities
3
+
4
+ import { Prompt } from '../../../../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ {"role": "system", "content":`You are the part of an AI entity named {{aiName}} that creates mermaid charts. Follow the user's detailed instructions and create a mermaid chart that meets the user's needs.
11
+
12
+ Mermaid Charts Instructions:
13
+
14
+ You are using Mermaid 11.6 with the xychart-beta extension, so you can write all standard Mermaid chart types in a markdown block (flowcharts, sequence diagrams, etc.) as well as bar charts and line charts using the xychart-beta extension.
15
+
16
+ Here is some example code of the xychart-beta extension that combines both bar and line functions:
17
+
18
+ \`\`\`mermaid
19
+ xychart-beta
20
+ title "Sales Revenue"
21
+ x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
22
+ y-axis "Revenue (in $)" 4000 --> 11000
23
+ bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
24
+ line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
25
+ \`\`\`
26
+
27
+ Mermaid is very sensitive to syntax errors, so make sure you check your chart definitions before finalizing your response. Some things to check for:
28
+
29
+ - All [] labels must be either quoted strings OR HTML-safe (no raw \\n or other special characters)
30
+ - No strings (e.g. null) in number series data
31
+ - Every subgraph has a matching end
32
+ - No lone arrows
33
+ - Use comments (%%) instead of stray text lines
34
+
35
+ Return only the mermaid chart markdown block and separate markdown for the chart key if necessary, with no other notes or comments.
36
+
37
+ {{renderTemplate AI_DATETIME}}`},
38
+ "{{chatHistory}}"
39
+ ]}),
40
+ ],
41
+ inputParameters: {
42
+ chatHistory: [{role: '', content: []}],
43
+ contextId: ``,
44
+ aiName: "Jarvis",
45
+ language: "English",
46
+ },
47
+ model: 'oai-gpt41',
48
+ useInputChunking: false,
49
+ enableDuplicateRequests: false,
50
+ timeout: 600,
51
+ toolDefinition: [{
52
+ type: "function",
53
+ icon: "📊",
54
+ function: {
55
+ name: "CreateMermaidChart",
56
+ description: "Creates a Mermaid chart in markdown format to visualize data or concepts. Call this tool any time you need to create a Mermaid chart as it will ensure that the chart is properly formatted and syntax-checked.",
57
+ parameters: {
58
+ type: "object",
59
+ properties: {
60
+ detailedInstructions: {
61
+ type: "string",
62
+ description: "Detailed instructions about what you need the tool to do"
63
+ },
64
+ userMessage: {
65
+ type: "string",
66
+ description: "A user-friendly message that describes what you're doing with this tool"
67
+ }
68
+ },
69
+ required: ["detailedInstructions", "userMessage"]
70
+ }
71
+ }
72
+ }],
73
+
74
+ executePathway: async ({args, runAllPrompts, resolver}) => {
75
+ if (args.detailedInstructions) {
76
+ args.chatHistory.push({role: "user", content: args.detailedInstructions});
77
+ }
78
+ let result = await runAllPrompts({ ...args, stream: false });
79
+ resolver.tool = JSON.stringify({ toolUsed: "coding" });
80
+ return result;
81
+ }
82
+ }
@@ -22,6 +22,10 @@ export default {
22
22
  useInputChunking: false,
23
23
  enableDuplicateRequests: false,
24
24
  timeout: 600,
25
+ geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
26
+ {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
27
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
28
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}],
25
29
  toolDefinition: [{
26
30
  type: "function",
27
31
  icon: "📄",
@@ -182,6 +182,10 @@ export default {
182
182
  },
183
183
  // model: 'oai-gpt41',
184
184
  model: 'gemini-pro-25-vision',
185
+ geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
186
+ {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
187
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
188
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}],
185
189
  timeout: 600,
186
190
  stream: true,
187
191
  }
@@ -62,6 +62,10 @@ export default {
62
62
  },
63
63
  timeout: 3600, // in seconds
64
64
  enableDuplicateRequests: false,
65
+ geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
66
+ {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
67
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
68
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}],
65
69
 
66
70
  executePathway: async ({args, runAllPrompts, resolver}) => {
67
71
  let intervalId;
@@ -33,6 +33,7 @@ import ApptekTranslatePlugin from './plugins/apptekTranslatePlugin.js';
33
33
  import GoogleTranslatePlugin from './plugins/googleTranslatePlugin.js';
34
34
  import GroqChatPlugin from './plugins/groqChatPlugin.js';
35
35
  import VeoVideoPlugin from './plugins/veoVideoPlugin.js';
36
+ import AzureFoundryAgentsPlugin from './plugins/azureFoundryAgentsPlugin.js';
36
37
 
37
38
  class ModelExecutor {
38
39
  constructor(pathway, model) {
@@ -133,6 +134,9 @@ class ModelExecutor {
133
134
  case 'VEO-VIDEO':
134
135
  plugin = new VeoVideoPlugin(pathway, model);
135
136
  break;
137
+ case 'AZURE-FOUNDRY-AGENTS':
138
+ plugin = new AzureFoundryAgentsPlugin(pathway, model);
139
+ break;
136
140
  default:
137
141
  throw new Error(`Unsupported model type: ${model.type}`);
138
142
  }