@cognigy/rest-api-client 2025.12.0 → 2025.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
  4. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  5. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  6. package/build/shared/charts/descriptors/index.js +5 -0
  7. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  8. package/build/shared/charts/descriptors/message/question/question.js +12 -1
  9. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  10. package/build/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  11. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +71 -175
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  14. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  15. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  16. package/build/shared/charts/descriptors/service/index.js +11 -1
  17. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +959 -0
  18. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  19. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  20. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  21. package/build/shared/constants.js +1 -5
  22. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  23. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  24. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  25. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  26. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  27. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  28. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  29. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  30. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  31. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  35. package/build/shared/interfaces/security/IPermission.js +4 -0
  36. package/build/shared/interfaces/security/IRole.js +5 -1
  37. package/build/shared/interfaces/security/index.js +1 -1
  38. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  39. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
  40. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  41. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  42. package/dist/esm/shared/charts/descriptors/index.js +6 -1
  43. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  44. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
  45. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  46. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  47. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +72 -176
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  50. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  51. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
  53. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +946 -0
  54. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  55. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  56. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  57. package/dist/esm/shared/constants.js +1 -5
  58. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  59. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  60. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  62. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  63. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  64. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  65. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  66. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  68. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  69. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/security/IPermission.js +4 -0
  72. package/dist/esm/shared/interfaces/security/IRole.js +5 -1
  73. package/dist/esm/shared/interfaces/security/index.js +1 -1
  74. package/package.json +1 -1
  75. package/types/index.d.ts +2093 -1927
@@ -5,9 +5,10 @@ import { randomUUID } from 'crypto';
5
5
  import { setSessionConfig } from "../../voice/mappers/setSessionConfig.mapper";
6
6
  import { voiceConfigParamsToVoiceSettings } from "../../voice/mappers/setSessionConfig.mapper";
7
7
  import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebugMode";
8
- import { createSystemMessage, validateToolId, getCognigyBrandMessage } from "./helpers/createSystemMessage";
8
+ import { createSystemMessage, getCognigyBrandMessage } from "./helpers/createSystemMessage";
9
9
  import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
10
10
  import { getUserMemory } from "./helpers/getUserMemory";
11
+ import { createToolDefinitions } from "./helpers/createToolDefinitions";
11
12
  import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
12
13
  export const AI_AGENT_JOB = createNodeDescriptor({
13
14
  type: "aiAgentJob",
@@ -65,7 +66,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
65
66
  {
66
67
  key: "name",
67
68
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__JOB_NAME__LABEL",
68
- type: "cognigyText",
69
+ type: "cognigyLLMText",
69
70
  defaultValue: "Customer Support Specialist",
70
71
  params: {
71
72
  required: true,
@@ -498,6 +499,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
498
499
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_CONFIG__DESCRIPTION",
499
500
  defaultValue: true
500
501
  },
502
+ {
503
+ key: "debugLogLLMLatency",
504
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
505
+ type: "toggle",
506
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
507
+ defaultValue: false
508
+ },
501
509
  {
502
510
  key: "storeLocation",
503
511
  type: "select",
@@ -817,7 +825,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
817
825
  "debugResult",
818
826
  "debugLogTokenCount",
819
827
  "debugLogSystemPrompt",
820
- "debugLogToolDefinitions"
828
+ "debugLogToolDefinitions",
829
+ "debugLogLLMLatency"
821
830
  ],
822
831
  }
823
832
  ],
@@ -838,9 +847,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
838
847
  ],
839
848
  tags: ["ai", "aiAgent"],
840
849
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
841
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
850
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
842
851
  const { api, context, input, profile, flowReferenceId } = cognigy;
843
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
852
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
844
853
  try {
845
854
  if (!aiAgent) {
846
855
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -888,7 +897,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
888
897
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
889
898
  }
890
899
  }
891
- const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
900
+ const _22 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _22, cleanedProfile = __rest(_22, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
892
901
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
893
902
  /**
894
903
  * ----- Knowledge Search Section -----
@@ -1013,149 +1022,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1013
1022
  const debugSystemMessage = (_r = (_q = systemMessage[0]) === null || _q === void 0 ? void 0 : _q.content) === null || _r === void 0 ? void 0 : _r.replace(`${getCognigyBrandMessage()}\n`, "");
1014
1023
  (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, debugSystemMessage, "UI__DEBUG_MODE__AI_AGENT_JOB__SYSTEM_PROMPT__HEADER");
1015
1024
  }
1016
- // Create Tools JSON
1017
- /** This is the list of tools that are used in the AI Agent Job */
1018
- const tools = [];
1019
- /** Array of tool IDs for deduping */
1020
- const toolIds = [];
1021
- /** Map of MCP tool IDs to their respective node IDs they were loaded from */
1022
- const toolMap = new Map();
1023
- /** Array of tool names for listing in the debug message */
1024
- const toolNames = [];
1025
- for (const child of childConfigs) {
1026
- if (child.type === "aiAgentJobDefault") {
1027
- continue;
1028
- }
1029
- const toolId = child.config.toolId;
1030
- if (child.type === "aiAgentJobTool" &&
1031
- (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
1032
- if (!toolId) {
1033
- throw new Error(`Tool ID is missing in Tool Node configuration.`);
1034
- }
1035
- const parsedToolId = api.parseCognigyScriptText(toolId);
1036
- if (!validateToolId(parsedToolId)) {
1037
- throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
1038
- }
1039
- if (toolIds.includes(parsedToolId)) {
1040
- throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
1041
- }
1042
- toolIds.push(parsedToolId);
1043
- toolNames.push(parsedToolId);
1044
- const tool = {
1045
- type: "function",
1046
- function: {
1047
- name: parsedToolId,
1048
- description: api.parseCognigyScriptText(child.config.description),
1049
- },
1050
- };
1051
- if (useStrict) {
1052
- tool.function.strict = true;
1053
- }
1054
- if (child.config.useParameters) {
1055
- tool.function.parameters = child.config.parameters;
1056
- }
1057
- tools.push(tool);
1058
- }
1059
- if (child.type === "aiAgentJobMCPTool" &&
1060
- (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
1061
- if (!child.config.mcpServerUrl) {
1062
- throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
1063
- }
1064
- const mcpServerUrl = child.config.mcpServerUrl;
1065
- const timeout = child.config.timeout;
1066
- const cacheTools = child.config.cacheTools;
1067
- const sendDebug = child.config.debugMessageFetchedTools;
1068
- const toolFilter = child.config.toolFilter;
1069
- let mcpTools = null;
1070
- try {
1071
- mcpTools = yield api.fetchMcpTools({
1072
- mcpServerUrl,
1073
- timeout,
1074
- cacheTools,
1075
- });
1076
- }
1077
- catch (error) {
1078
- const errorDetails = error instanceof Error
1079
- ? {
1080
- name: error.name,
1081
- message: error.message,
1082
- }
1083
- : error;
1084
- (_t = api.logDebugError) === null || _t === void 0 ? void 0 : _t.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1085
- }
1086
- if (mcpTools) {
1087
- if (sendDebug) {
1088
- if (mcpTools.length === 0) {
1089
- (_u = api.logDebugMessage) === null || _u === void 0 ? void 0 : _u.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1090
- }
1091
- if (mcpTools.length > 0) {
1092
- const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
1093
- mcpTools.forEach((tool) => {
1094
- messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
1095
- if (child.config.debugMessageParameters && tool.inputSchema) {
1096
- messageLines.push(` <b>Parameters</b>:`);
1097
- Object.keys(tool.inputSchema.properties).forEach((key) => {
1098
- const parameter = tool.inputSchema.properties[key];
1099
- const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
1100
- if (parameter.description) {
1101
- messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
1102
- }
1103
- else {
1104
- messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
1105
- }
1106
- });
1107
- }
1108
- });
1109
- (_v = api.logDebugMessage) === null || _v === void 0 ? void 0 : _v.call(api, messageLines.join("\n"), "MCP Tool");
1110
- }
1111
- }
1112
- const filteredMcpTools = mcpTools.filter((tool) => {
1113
- if (toolFilter && toolFilter !== "none") {
1114
- if (toolFilter === "whitelist" && child.config.whitelist) {
1115
- const whitelist = child.config.whitelist.map((item) => item.trim());
1116
- return whitelist.includes(tool.name);
1117
- }
1118
- else if (toolFilter === "blacklist") {
1119
- // If the blacklist is falsy, all tools are allowed
1120
- if (!child.config.blacklist) {
1121
- return true;
1122
- }
1123
- const blacklist = child.config.blacklist.map((item) => item.trim());
1124
- return !blacklist.includes(tool.name);
1125
- }
1126
- }
1127
- else {
1128
- return true;
1129
- }
1130
- });
1131
- const structuredMcpTools = [];
1132
- filteredMcpTools.forEach((tool) => {
1133
- var _a;
1134
- if (toolIds.includes(tool.name)) {
1135
- (_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
1136
- return;
1137
- }
1138
- // add tool to the list of tool ids to prevent duplicates
1139
- toolIds.push(tool.name);
1140
- toolNames.push(`${tool.name} (${child.config.name})`);
1141
- toolMap.set(tool.name, child.id);
1142
- const structuredTool = {
1143
- type: "function",
1144
- function: {
1145
- name: tool.name,
1146
- description: tool.description,
1147
- },
1148
- };
1149
- if (tool.inputSchema) {
1150
- structuredTool.function.parameters = tool.inputSchema;
1151
- }
1152
- structuredMcpTools.push(structuredTool);
1153
- });
1154
- tools.push(...structuredMcpTools);
1155
- }
1156
- }
1157
- }
1158
- ;
1025
+ const { toolIds, toolNames, toolMap, tools } = yield createToolDefinitions(childConfigs, api, useStrict);
1159
1026
  // we only add this tool if at least one knowledge source is enabled
1160
1027
  if (isOnDemandKnowledgeStoreConfigured) {
1161
1028
  const knowledgeTool = {
@@ -1187,7 +1054,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1187
1054
  tools.push(knowledgeTool);
1188
1055
  }
1189
1056
  if (debugLogToolDefinitions) {
1190
- (_w = api.logDebugMessage) === null || _w === void 0 ? void 0 : _w.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
1057
+ (_t = api.logDebugMessage) === null || _t === void 0 ? void 0 : _t.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
1191
1058
  }
1192
1059
  // Optional Debug Message with the config
1193
1060
  if (debugConfig) {
@@ -1196,10 +1063,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1196
1063
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
1197
1064
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
1198
1065
  // Safety settings
1199
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_x = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _x === void 0 ? void 0 : _x.avoidHarmfulContent}`);
1200
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_y = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _y === void 0 ? void 0 : _y.avoidUngroundedContent}`);
1201
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_z = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _z === void 0 ? void 0 : _z.avoidCopyrightInfringements}`);
1202
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_0 = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _0 === void 0 ? void 0 : _0.preventJailbreakAndManipulation}`);
1066
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidHarmfulContent}`);
1067
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidUngroundedContent}`);
1068
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.avoidCopyrightInfringements}`);
1069
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_x = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _x === void 0 ? void 0 : _x.preventJailbreakAndManipulation}`);
1203
1070
  // Tools
1204
1071
  if (toolNames.length > 0) {
1205
1072
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
@@ -1255,7 +1122,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1255
1122
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1256
1123
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1257
1124
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1258
- (_1 = api.logDebugMessage) === null || _1 === void 0 ? void 0 : _1.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1125
+ (_y = api.logDebugMessage) === null || _y === void 0 ? void 0 : _y.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1259
1126
  }
1260
1127
  const transcript = yield api.getTranscript({
1261
1128
  limit: 50,
@@ -1269,17 +1136,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1269
1136
  transcript.length > 0 &&
1270
1137
  transcript[transcript.length - 1].role === TranscriptRole.USER) {
1271
1138
  const userInput = transcript[transcript.length - 1];
1272
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_2 = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _2 === void 0 ? void 0 : _2.text) || input.text}`;
1139
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_z = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _z === void 0 ? void 0 : _z.text) || input.text}`;
1273
1140
  transcript[transcript.length - 1].payload.text = enhancedInput;
1274
1141
  }
1275
1142
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1276
1143
  const _messageId = randomUUID();
1277
1144
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1278
1145
  // Temp fix to override the transcript if needed
1279
- transcript: ((_3 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _3 === void 0 ? void 0 : _3.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1146
+ transcript: ((_0 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _0 === void 0 ? void 0 : _0.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1280
1147
  var _a;
1281
1148
  text = isStreamingChannel ? text : text.trim();
1282
1149
  if (text) {
1150
+ // Record first output time for debugging if not already recorded
1151
+ if (debugLogLLMLatency && firstOutputTime === null) {
1152
+ firstOutputTime = Date.now();
1153
+ }
1283
1154
  // if we got text, we output it, but prevent it from being added to the transcript
1284
1155
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
1285
1156
  _cognigy: {
@@ -1300,15 +1171,40 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1300
1171
  };
1301
1172
  }
1302
1173
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1303
- (_4 = api.setAnalyticsData) === null || _4 === void 0 ? void 0 : _4.call(api, "understood", "true");
1174
+ (_1 = api.setAnalyticsData) === null || _1 === void 0 ? void 0 : _1.call(api, "understood", "true");
1304
1175
  input.understood = true;
1305
- const fullLlmResult = yield ((_5 = api.runGenerativeAIPrompt) === null || _5 === void 0 ? void 0 : _5.call(api, llmPromptOptions, "aiAgent"));
1176
+ // Start measuring LLM latency and time to first output if debug flag is enabled
1177
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
1178
+ let firstOutputTime = null;
1179
+ const fullLlmResult = yield ((_2 = api.runGenerativeAIPrompt) === null || _2 === void 0 ? void 0 : _2.call(api, llmPromptOptions, "aiAgent"));
1180
+ // End measuring times and log if debug flag is enabled
1181
+ if (debugLogLLMLatency) {
1182
+ const llmEndTime = Date.now();
1183
+ const debugMessages = [];
1184
+ const llmLatencyMs = llmEndTime - llmStartTime;
1185
+ let timeToFirstOutputLabel;
1186
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
1187
+ timeToFirstOutputLabel = " - (tool call)";
1188
+ }
1189
+ else if (firstOutputTime === null) {
1190
+ timeToFirstOutputLabel = " - (no output)";
1191
+ }
1192
+ else {
1193
+ firstOutputTime = firstOutputTime || llmEndTime;
1194
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
1195
+ }
1196
+ if (storeLocation === "stream") {
1197
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
1198
+ }
1199
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
1200
+ (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
1201
+ }
1306
1202
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1307
1203
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1308
1204
  const tokenUsage = fullLlmResult.tokenUsage;
1309
1205
  // Send optional debug message with token usage
1310
1206
  if (debugLogTokenCount && tokenUsage) {
1311
- (_6 = api.logDebugMessage) === null || _6 === void 0 ? void 0 : _6.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1207
+ (_4 = api.logDebugMessage) === null || _4 === void 0 ? void 0 : _4.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1312
1208
  }
1313
1209
  // Identify if the result is a tool call
1314
1210
  // If response is a tool call, set next node for Tools
@@ -1323,7 +1219,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1323
1219
  isMcpToolCall = true;
1324
1220
  }
1325
1221
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1326
- (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1222
+ (_5 = api.logDebugError) === null || _5 === void 0 ? void 0 : _5.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1327
1223
  }
1328
1224
  // Add last tool call to session state for loading it from Tool Answer Node
1329
1225
  api.updateSessionStateValues({
@@ -1331,21 +1227,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1331
1227
  flow: flowReferenceId,
1332
1228
  node: nodeId,
1333
1229
  } }, (isMcpToolCall && {
1334
- mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1335
- timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1230
+ mcpServerUrl: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.mcpServerUrl,
1231
+ timeout: (_7 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _7 === void 0 ? void 0 : _7.timeout,
1336
1232
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1337
1233
  })), { toolCall: mainToolCall }),
1338
1234
  });
1339
1235
  // if there are any parameters/arguments, add them to the input slots
1340
1236
  if (mainToolCall.function.arguments) {
1341
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1237
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_9 = (_8 = input.aiAgent) === null || _8 === void 0 ? void 0 : _8.toolArgs) !== null && _9 !== void 0 ? _9 : {}), mainToolCall.function.arguments) });
1342
1238
  }
1343
1239
  // Debug Message for Tool Calls, configured in the Tool Node
1344
- if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1240
+ if ((_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.debugMessage) {
1345
1241
  const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1346
1242
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1347
1243
  // Arguments / Parameters Slots
1348
- const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1244
+ const slots = ((_11 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _11 === void 0 ? void 0 : _11.arguments) && Object.keys(mainToolCall.function.arguments);
1349
1245
  const hasSlots = slots && slots.length > 0;
1350
1246
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1351
1247
  if (hasSlots) {
@@ -1360,7 +1256,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1360
1256
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1361
1257
  });
1362
1258
  }
1363
- (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1259
+ (_12 = api.logDebugMessage) === null || _12 === void 0 ? void 0 : _12.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1364
1260
  }
1365
1261
  if (toolChild) {
1366
1262
  api.setNextNode(toolChild.id);
@@ -1385,11 +1281,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1385
1281
  }
1386
1282
  // Optionally output the result immediately
1387
1283
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1388
- yield ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1284
+ yield ((_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, llmResult.result, {}));
1389
1285
  }
1390
1286
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1391
1287
  // send the finishReason as last output for a stream
1392
- (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1288
+ (_14 = api.output) === null || _14 === void 0 ? void 0 : _14.call(api, "", {
1393
1289
  _cognigy: {
1394
1290
  _preventTranscript: true,
1395
1291
  _messageId,
@@ -1412,7 +1308,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1412
1308
  }
1413
1309
  // Add response to Cognigy Input/Context for further usage
1414
1310
  if (storeLocation === "context") {
1415
- (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1311
+ (_15 = api.addToContext) === null || _15 === void 0 ? void 0 : _15.call(api, contextKey, llmResult, "simple");
1416
1312
  }
1417
1313
  else if (storeLocation === "input") {
1418
1314
  api.addToInput(inputKey, llmResult);
@@ -1425,14 +1321,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1425
1321
  const errorDetails = {
1426
1322
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1427
1323
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1428
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1324
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_16 = error.originalErrorDetails) === null || _16 === void 0 ? void 0 : _16.message),
1429
1325
  };
1430
- (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1326
+ (_17 = api.emitEvent) === null || _17 === void 0 ? void 0 : _17.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1431
1327
  if (logErrorToSystem) {
1432
- (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1328
+ (_18 = api.log) === null || _18 === void 0 ? void 0 : _18.call(api, "error", JSON.stringify(errorDetails));
1433
1329
  }
1434
1330
  if (errorHandling !== "stop") {
1435
- (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1331
+ (_19 = api.logDebugError) === null || _19 === void 0 ? void 0 : _19.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1436
1332
  }
1437
1333
  if (storeErrorInInput) {
1438
1334
  input.aiAgent = input.aiAgent || {};
@@ -1441,7 +1337,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1441
1337
  if (errorHandling === "continue") {
1442
1338
  // output the timeout message
1443
1339
  if (errorMessage) {
1444
- yield ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1340
+ yield ((_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, errorMessage, null));
1445
1341
  }
1446
1342
  // Set default node as next node
1447
1343
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1453,7 +1349,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1453
1349
  if (!errorHandlingGotoTarget) {
1454
1350
  throw new Error("GoTo Target is required");
1455
1351
  }
1456
- if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1352
+ if (!((_21 = api.checkThink) === null || _21 === void 0 ? void 0 : _21.call(api, nodeId))) {
1457
1353
  api.resetNextNodes();
1458
1354
  yield api.executeFlow({
1459
1355
  flowNode: {
@@ -26,7 +26,7 @@ export const AI_AGENT_JOB_TOOL = createNodeDescriptor({
26
26
  key: "toolId",
27
27
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__LABEL",
28
28
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__DESCRIPTION",
29
- type: "cognigyText",
29
+ type: "cognigyLLMText",
30
30
  defaultValue: "unlock_account",
31
31
  params: {
32
32
  required: true,
@@ -39,7 +39,7 @@ export const AI_AGENT_JOB_TOOL = createNodeDescriptor({
39
39
  key: "description",
40
40
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__LABEL",
41
41
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__DESCRIPTION",
42
- type: "cognigyText",
42
+ type: "cognigyLLMText",
43
43
  defaultValue: "This tool unlocks a locked user account.",
44
44
  params: {
45
45
  required: true,
@@ -0,0 +1,172 @@
1
+ import { __awaiter } from "tslib";
2
+ import { validateToolId } from "./createSystemMessage";
3
+ /**
4
+ * Creates the tool definitions for the AI Agent Job and LLM Prompt v2 Nodes
5
+ * @param childConfigs Child node configurations
6
+ * @param api Cognigy API
7
+ * @param useStrict Whether to use strict mode for the tools
8
+ * @returns An object containing the tool definitions
9
+ */
10
+ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter(void 0, void 0, void 0, function* () {
11
+ var _a, _b, _c;
12
+ // Create Tools JSON
13
+ /** This is the list of tools that are used in the AI Agent Job */
14
+ const tools = [];
15
+ /** Array of tool IDs for deduping */
16
+ const toolIds = [];
17
+ /** Map of MCP tool IDs to their respective node IDs they were loaded from */
18
+ const toolMap = new Map();
19
+ /** Array of tool names for listing in the debug message */
20
+ const toolNames = [];
21
+ // if no child configs are provided, return empty tool definitions
22
+ if (!childConfigs || childConfigs.length === 0) {
23
+ return {
24
+ toolIds: [],
25
+ toolNames: [],
26
+ toolMap: new Map(),
27
+ tools: [],
28
+ };
29
+ }
30
+ // Loop through all child nodes and create the tools
31
+ for (const child of childConfigs) {
32
+ if (child.type === "aiAgentJobDefault" || child.type === "llmPromptDefault") {
33
+ continue;
34
+ }
35
+ const toolId = child.config.toolId;
36
+ if ((child.type === "aiAgentJobTool" || child.type === "llmPromptTool") &&
37
+ (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
38
+ if (!toolId) {
39
+ throw new Error(`Tool ID is missing in Tool Node configuration.`);
40
+ }
41
+ const parsedToolId = api.parseCognigyScriptText(toolId);
42
+ if (!validateToolId(parsedToolId)) {
43
+ throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
44
+ }
45
+ if (toolIds.includes(parsedToolId)) {
46
+ throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
47
+ }
48
+ toolIds.push(parsedToolId);
49
+ toolNames.push(parsedToolId);
50
+ const tool = {
51
+ type: "function",
52
+ function: {
53
+ name: parsedToolId,
54
+ description: api.parseCognigyScriptText(child.config.description),
55
+ },
56
+ };
57
+ if (useStrict) {
58
+ tool.function.strict = true;
59
+ }
60
+ if (child.config.useParameters) {
61
+ tool.function.parameters = child.config.parameters;
62
+ }
63
+ tools.push(tool);
64
+ }
65
+ if ((child.type === "aiAgentJobMCPTool" || child.type === "llmPromptMCPTool") &&
66
+ (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
67
+ if (!child.config.mcpServerUrl) {
68
+ throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
69
+ }
70
+ const mcpServerUrl = child.config.mcpServerUrl;
71
+ const timeout = child.config.timeout;
72
+ const cacheTools = child.config.cacheTools;
73
+ const sendDebug = child.config.debugMessageFetchedTools;
74
+ const toolFilter = child.config.toolFilter;
75
+ let mcpTools = null;
76
+ try {
77
+ mcpTools = yield api.fetchMcpTools({
78
+ mcpServerUrl,
79
+ timeout,
80
+ cacheTools,
81
+ });
82
+ }
83
+ catch (error) {
84
+ const errorDetails = error instanceof Error
85
+ ? {
86
+ name: error.name,
87
+ message: error.message,
88
+ }
89
+ : error;
90
+ (_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
91
+ }
92
+ if (mcpTools) {
93
+ if (sendDebug) {
94
+ if (mcpTools.length === 0) {
95
+ (_b = api.logDebugMessage) === null || _b === void 0 ? void 0 : _b.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
96
+ }
97
+ if (mcpTools.length > 0) {
98
+ const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
99
+ mcpTools.forEach((tool) => {
100
+ messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
101
+ if (child.config.debugMessageParameters && tool.inputSchema) {
102
+ messageLines.push(` <b>Parameters</b>:`);
103
+ Object.keys(tool.inputSchema.properties).forEach((key) => {
104
+ const parameter = tool.inputSchema.properties[key];
105
+ const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
106
+ if (parameter.description) {
107
+ messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
108
+ }
109
+ else {
110
+ messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
111
+ }
112
+ });
113
+ }
114
+ });
115
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, messageLines.join("\n"), "MCP Tool");
116
+ }
117
+ }
118
+ const filteredMcpTools = mcpTools.filter((tool) => {
119
+ if (toolFilter && toolFilter !== "none") {
120
+ if (toolFilter === "whitelist" && child.config.whitelist) {
121
+ const whitelist = child.config.whitelist.map((item) => item.trim());
122
+ return whitelist.includes(tool.name);
123
+ }
124
+ else if (toolFilter === "blacklist") {
125
+ // If the blacklist is falsy, all tools are allowed
126
+ if (!child.config.blacklist) {
127
+ return true;
128
+ }
129
+ const blacklist = child.config.blacklist.map((item) => item.trim());
130
+ return !blacklist.includes(tool.name);
131
+ }
132
+ }
133
+ else {
134
+ return true;
135
+ }
136
+ });
137
+ const structuredMcpTools = [];
138
+ filteredMcpTools.forEach((tool) => {
139
+ var _a;
140
+ if (toolIds.includes(tool.name)) {
141
+ (_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
142
+ return;
143
+ }
144
+ // add tool to the list of tool ids to prevent duplicates
145
+ toolIds.push(tool.name);
146
+ toolNames.push(`${tool.name} (${child.config.name})`);
147
+ toolMap.set(tool.name, child.id);
148
+ const structuredTool = {
149
+ type: "function",
150
+ function: {
151
+ name: tool.name,
152
+ description: tool.description,
153
+ },
154
+ };
155
+ if (tool.inputSchema) {
156
+ structuredTool.function.parameters = tool.inputSchema;
157
+ }
158
+ structuredMcpTools.push(structuredTool);
159
+ });
160
+ tools.push(...structuredMcpTools);
161
+ }
162
+ }
163
+ }
164
+ ;
165
+ return {
166
+ toolIds,
167
+ toolNames,
168
+ toolMap,
169
+ tools,
170
+ };
171
+ });
172
+ //# sourceMappingURL=createToolDefinitions.js.map