@cognigy/rest-api-client 4.98.0 → 4.99.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/RestAPIClient.js +7 -0
  3. package/build/apigroups/AIOpsCenterAPIGroup_2_0.js +22 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -8
  5. package/build/apigroups/index.js +3 -1
  6. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
  7. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
  8. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
  9. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
  10. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +6 -2
  11. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +11 -0
  12. package/build/shared/charts/descriptors/index.js +2 -0
  13. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -1
  14. package/build/shared/charts/descriptors/service/GPTPrompt.js +14 -4
  15. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +185 -63
  16. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +180 -0
  17. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +196 -0
  18. package/build/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
  19. package/build/shared/charts/descriptors/service/index.js +5 -1
  20. package/build/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
  21. package/build/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  22. package/build/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
  23. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
  24. package/build/shared/interfaces/amqpInterface.js +3 -0
  25. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +9 -1
  26. package/build/shared/interfaces/resources/ILargeLanguageModel.js +10 -1
  27. package/build/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +3 -0
  28. package/build/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +3 -0
  29. package/build/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +34 -0
  30. package/build/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +3 -0
  31. package/build/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +3 -0
  34. package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +3 -0
  35. package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +3 -0
  36. package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +3 -0
  37. package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +3 -0
  38. package/build/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +6 -0
  39. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +3 -0
  40. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +4 -0
  41. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +3 -0
  42. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +3 -0
  43. package/build/shared/interfaces/security/IIdentityProvider.js +1 -0
  44. package/dist/esm/RestAPIClient.js +7 -0
  45. package/dist/esm/apigroups/AIOpsCenterAPIGroup_2_0.js +18 -0
  46. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -8
  47. package/dist/esm/apigroups/index.js +1 -0
  48. package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
  49. package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
  50. package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
  51. package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
  52. package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +4 -1
  53. package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +8 -0
  54. package/dist/esm/shared/charts/descriptors/index.js +3 -1
  55. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +12 -3
  56. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +18 -8
  57. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +185 -63
  58. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +178 -0
  59. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +193 -0
  60. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
  61. package/dist/esm/shared/charts/descriptors/service/index.js +2 -0
  62. package/dist/esm/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
  63. package/dist/esm/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  64. package/dist/esm/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
  65. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
  66. package/dist/esm/shared/interfaces/amqpInterface.js +3 -0
  67. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +9 -1
  68. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +9 -0
  69. package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +2 -0
  71. package/dist/esm/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +31 -0
  72. package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +2 -0
  73. package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +2 -0
  74. package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +2 -0
  75. package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +2 -0
  76. package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +2 -0
  77. package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +2 -0
  78. package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +2 -0
  79. package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +2 -0
  80. package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +5 -0
  81. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +2 -0
  82. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +3 -0
  83. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +2 -0
  84. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +2 -0
  85. package/dist/esm/shared/interfaces/security/IIdentityProvider.js +1 -0
  86. package/package.json +1 -1
  87. package/types/index.d.ts +309 -3
@@ -30,7 +30,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
30
30
  collapsable: true,
31
31
  placement: {
32
32
  children: {
33
- whitelist: ["aiAgentJobDefault", "aiAgentJobTool"],
33
+ whitelist: ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"],
34
34
  },
35
35
  },
36
36
  },
@@ -835,7 +835,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
835
835
  ],
836
836
  tags: ["ai", "aiAgent"],
837
837
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
838
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12;
838
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19;
839
839
  const { api, context, input, profile, flowReferenceId } = cognigy;
840
840
  const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
841
841
  try {
@@ -875,7 +875,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
875
875
  const voiceSettings = (0, setSessionConfig_mapper_2.voiceConfigParamsToVoiceSettings)(config, api);
876
876
  const payload = setSessionConfig_mapper_1.setSessionConfig.handleVGInput(voiceSettings, sessionParams || {}, api);
877
877
  if (payload && Object.keys(payload).length > 0) {
878
- api.say(null, {
878
+ (_b = api.say) === null || _b === void 0 ? void 0 : _b.call(api, null, {
879
879
  _cognigy: payload,
880
880
  });
881
881
  }
@@ -885,16 +885,16 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
885
885
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
886
886
  }
887
887
  }
888
- const _13 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _13, cleanedProfile = __rest(_13, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
888
+ const _20 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _20, cleanedProfile = __rest(_20, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
889
889
  const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
890
890
  /**
891
891
  * ----- Knowledge Search Section -----
892
892
  */
893
893
  let knowledgeSearchResponseData;
894
894
  const sessionState = await api.loadSessionState();
895
- const lastToolCall = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.toolCall;
895
+ const lastToolCall = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.toolCall;
896
896
  if (knowledgeSearchBehavior === "always" ||
897
- (knowledgeSearchBehavior === "onDemand" && ((_c = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _c === void 0 ? void 0 : _c.name) === "retrieve_knowledge")) {
897
+ (knowledgeSearchBehavior === "onDemand" && ((_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.name) === "retrieve_knowledge")) {
898
898
  const knowledgeStoreIds = [];
899
899
  if (knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) {
900
900
  knowledgeStoreIds.push(aiAgent.knowledgeReferenceId);
@@ -902,8 +902,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
902
902
  if (knowledgeSearchJobKnowledge && knowledgeSearchJobStore) {
903
903
  knowledgeStoreIds.push(knowledgeSearchJobStore);
904
904
  }
905
- if (knowledgeStoreIds.length > 0 && (input.text || ((_e = (_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.arguments) === null || _e === void 0 ? void 0 : _e.generated_prompt))) {
906
- let query = ((_g = (_f = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _f === void 0 ? void 0 : _f.arguments) === null || _g === void 0 ? void 0 : _g.generated_prompt) || input.text;
905
+ if (knowledgeStoreIds.length > 0 && (input.text || ((_f = (_e = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _e === void 0 ? void 0 : _e.arguments) === null || _f === void 0 ? void 0 : _f.generated_prompt))) {
906
+ let query = ((_h = (_g = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_prompt) || input.text;
907
907
  if (knowledgeSearchBehavior === "always" && knowledgeSearchGenerateSearchPrompt) {
908
908
  const generatedSearchPrompt = await (0, generateSearchPrompt_1.generateSearchPrompt)({ api, userMemory, llmProviderReferenceId, debugLogTokenCount, memoryContextInjection });
909
909
  if (generatedSearchPrompt === null || generatedSearchPrompt === void 0 ? void 0 : generatedSearchPrompt.generated_prompt) {
@@ -919,10 +919,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
919
919
  knowledgeStoreIds,
920
920
  };
921
921
  if (knowledgeSearchBehavior === "onDemand") {
922
- const generated_buffer_phrase = (_j = (_h = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _h === void 0 ? void 0 : _h.arguments) === null || _j === void 0 ? void 0 : _j.generated_buffer_phrase;
922
+ const generated_buffer_phrase = (_k = (_j = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _j === void 0 ? void 0 : _j.arguments) === null || _k === void 0 ? void 0 : _k.generated_buffer_phrase;
923
923
  if (generated_buffer_phrase) {
924
924
  // output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
925
- await ((_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, generated_buffer_phrase, {
925
+ await ((_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, generated_buffer_phrase, {
926
926
  _cognigy: {
927
927
  _preventTranscript: true
928
928
  }
@@ -952,7 +952,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
952
952
  if (query) {
953
953
  messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
954
954
  }
955
- if ((_l = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _l === void 0 ? void 0 : _l.length) {
955
+ if ((_m = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _m === void 0 ? void 0 : _m.length) {
956
956
  knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
957
957
  var _a;
958
958
  messageLines.push(`\nTop ${index + 1}:`);
@@ -964,7 +964,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
964
964
  else {
965
965
  messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
966
966
  }
967
- (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
967
+ (_o = api.logDebugMessage) === null || _o === void 0 ? void 0 : _o.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
968
968
  }
969
969
  // Knowledge Search "onDemand" responds to a tool call
970
970
  if (knowledgeSearchBehavior === "onDemand") {
@@ -993,7 +993,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
993
993
  await api.addTranscriptStep(toolAnswer);
994
994
  }
995
995
  }
996
- if (((_o = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _o === void 0 ? void 0 : _o.name) === "retrieve_knowledge") {
996
+ if (((_p = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _p === void 0 ? void 0 : _p.name) === "retrieve_knowledge") {
997
997
  // remove the retrieve_knowledge toolCall from session state to avoid infinite loops
998
998
  api.updateSessionStateValues({
999
999
  lastToolCall: undefined
@@ -1005,32 +1005,39 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1005
1005
  // create the system Message from the AI Agent resource and this Node's config storage
1006
1006
  const systemMessage = (0, createSystemMessage_1.createSystemMessage)(aiAgent, input, jobName, jobDescription, jobInstructions, userMemory, memoryContextInjection, isOnDemandKnowledgeStoreConfigured ? "onDemand" : "none");
1007
1007
  // Create Tools JSON
1008
+ /** This is the list of tools that are used in the AI Agent Job */
1008
1009
  const tools = [];
1010
+ /** Array of tool IDs for deduping */
1009
1011
  const toolIds = [];
1010
- childConfigs.forEach(child => {
1012
+ /** Map of MCP tool IDs to their respective node IDs they were loaded from */
1013
+ const toolMap = new Map();
1014
+ /** Array of tool names for listing in the debug message */
1015
+ const toolNames = [];
1016
+ for (const child of childConfigs) {
1011
1017
  if (child.type === "aiAgentJobDefault") {
1012
- return;
1018
+ continue;
1013
1019
  }
1014
- ;
1015
1020
  const toolId = child.config.toolId;
1016
- if (!toolId) {
1017
- throw new Error(`Tool ID is missing in Tool Node configuration.`);
1018
- }
1019
- const parsedToolId = api.parseCognigyScriptText(toolId);
1020
- if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
1021
- throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
1022
- }
1023
- if (toolIds.includes(parsedToolId)) {
1024
- throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
1025
- }
1026
- toolIds.push(parsedToolId);
1027
- if (child.type === "aiAgentJobTool" && (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
1021
+ if (child.type === "aiAgentJobTool" &&
1022
+ (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
1023
+ if (!toolId) {
1024
+ throw new Error(`Tool ID is missing in Tool Node configuration.`);
1025
+ }
1026
+ const parsedToolId = api.parseCognigyScriptText(toolId);
1027
+ if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
1028
+ throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
1029
+ }
1030
+ if (toolIds.includes(parsedToolId)) {
1031
+ throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
1032
+ }
1033
+ toolIds.push(parsedToolId);
1034
+ toolNames.push(parsedToolId);
1028
1035
  const tool = {
1029
1036
  type: "function",
1030
1037
  function: {
1031
1038
  name: parsedToolId,
1032
1039
  description: api.parseCognigyScriptText(child.config.description),
1033
- }
1040
+ },
1034
1041
  };
1035
1042
  if (useStrict) {
1036
1043
  tool.function.strict = true;
@@ -1040,7 +1047,106 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1040
1047
  }
1041
1048
  tools.push(tool);
1042
1049
  }
1043
- });
1050
+ if (child.type === "aiAgentJobMCPTool" &&
1051
+ (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
1052
+ if (!child.config.mcpServerUrl) {
1053
+ throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
1054
+ }
1055
+ const mcpServerUrl = child.config.mcpServerUrl;
1056
+ const timeout = child.config.timeout;
1057
+ const cacheTools = child.config.cacheTools;
1058
+ const sendDebug = child.config.debugMessageFetchedTools;
1059
+ const toolFilter = child.config.toolFilter;
1060
+ let mcpTools = null;
1061
+ try {
1062
+ mcpTools = await api.fetchMcpTools({
1063
+ mcpServerUrl,
1064
+ timeout,
1065
+ cacheTools,
1066
+ });
1067
+ }
1068
+ catch (error) {
1069
+ const errorDetails = error instanceof Error
1070
+ ? {
1071
+ name: error.name,
1072
+ message: error.message,
1073
+ }
1074
+ : error;
1075
+ (_q = api.logDebugError) === null || _q === void 0 ? void 0 : _q.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1076
+ }
1077
+ if (mcpTools) {
1078
+ if (sendDebug) {
1079
+ if (mcpTools.length === 0) {
1080
+ (_r = api.logDebugMessage) === null || _r === void 0 ? void 0 : _r.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1081
+ }
1082
+ if (mcpTools.length > 0) {
1083
+ const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
1084
+ mcpTools.forEach((tool) => {
1085
+ messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
1086
+ if (child.config.debugMessageParameters && tool.inputSchema) {
1087
+ messageLines.push(` <b>Parameters</b>:`);
1088
+ Object.keys(tool.inputSchema.properties).forEach((key) => {
1089
+ const parameter = tool.inputSchema.properties[key];
1090
+ const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
1091
+ if (parameter.description) {
1092
+ messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
1093
+ }
1094
+ else {
1095
+ messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
1096
+ }
1097
+ });
1098
+ }
1099
+ });
1100
+ (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "MCP Tool");
1101
+ }
1102
+ }
1103
+ const filteredMcpTools = mcpTools.filter((tool) => {
1104
+ if (toolFilter && toolFilter !== "none") {
1105
+ if (toolFilter === "whitelist" && child.config.whitelist) {
1106
+ const whitelist = child.config.whitelist.map((item) => item.trim());
1107
+ return whitelist.includes(tool.name);
1108
+ }
1109
+ else if (toolFilter === "blacklist") {
1110
+ // If the blacklist is falsy, all tools are allowed
1111
+ if (!child.config.blacklist) {
1112
+ return true;
1113
+ }
1114
+ const blacklist = child.config.blacklist.map((item) => item.trim());
1115
+ return !blacklist.includes(tool.name);
1116
+ }
1117
+ }
1118
+ else {
1119
+ return true;
1120
+ }
1121
+ });
1122
+ const structuredMcpTools = [];
1123
+ filteredMcpTools.forEach((tool) => {
1124
+ var _a;
1125
+ if (toolIds.includes(tool.name)) {
1126
+ (_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
1127
+ return;
1128
+ }
1129
+ // add tool to the list of tool ids to prevent duplicates
1130
+ toolIds.push(tool.name);
1131
+ toolNames.push(`${tool.name} (${child.config.name})`);
1132
+ toolMap.set(tool.name, child.id);
1133
+ const structuredTool = {
1134
+ type: "function",
1135
+ function: {
1136
+ name: tool.name,
1137
+ description: tool.description,
1138
+ },
1139
+ };
1140
+ if (tool.inputSchema) {
1141
+ structuredTool.function.parameters = tool.inputSchema;
1142
+ }
1143
+ structuredMcpTools.push(structuredTool);
1144
+ });
1145
+ tools.push(...structuredMcpTools);
1146
+ }
1147
+ }
1148
+ }
1149
+ ;
1044
1150
  // Optional Debug Message with the config
1045
1151
  if (debugConfig) {
1046
1152
  const messageLines = [];
@@ -1048,14 +1154,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1048
1154
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
1049
1155
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
1050
1156
  // Safety settings
1051
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_p = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _p === void 0 ? void 0 : _p.avoidHarmfulContent}`);
1052
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_q = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _q === void 0 ? void 0 : _q.avoidUngroundedContent}`);
1053
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_r = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _r === void 0 ? void 0 : _r.avoidCopyrightInfringements}`);
1054
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_s = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _s === void 0 ? void 0 : _s.preventJailbreakAndManipulation}`);
1157
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_t = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _t === void 0 ? void 0 : _t.avoidHarmfulContent}`);
1158
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidUngroundedContent}`);
1159
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidCopyrightInfringements}`);
1160
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.preventJailbreakAndManipulation}`);
1055
1161
  // Tools
1056
- if (tools.length > 0) {
1162
+ if (toolNames.length > 0) {
1057
1163
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
1058
- tools.forEach(tool => messageLines.push(`- ${tool.function.name}`));
1164
+ toolNames.forEach(toolName => messageLines.push(`- ${toolName}`));
1059
1165
  }
1060
1166
  // Memory
1061
1167
  if (memoryType !== "none") {
@@ -1107,7 +1213,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1107
1213
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1108
1214
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1109
1215
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1110
- (_t = api.logDebugMessage) === null || _t === void 0 ? void 0 : _t.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1216
+ (_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1111
1217
  }
1112
1218
  // keep this after the debug message since the "retrieve_knowledge" tool is implicit
1113
1219
  // we only add this tool if at least one knowledge source is enabled
@@ -1151,14 +1257,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1151
1257
  transcript.length > 0 &&
1152
1258
  transcript[transcript.length - 1].role === transcripts_1.TranscriptRole.USER) {
1153
1259
  const userInput = transcript[transcript.length - 1];
1154
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_u = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _u === void 0 ? void 0 : _u.text) || input.text}`;
1260
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_y = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _y === void 0 ? void 0 : _y.text) || input.text}`;
1155
1261
  transcript[transcript.length - 1].payload.text = enhancedInput;
1156
1262
  }
1157
1263
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1158
1264
  const _messageId = (0, crypto_1.randomUUID)();
1159
1265
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1160
1266
  // Temp fix to override the transcript if needed
1161
- transcript: ((_v = context === null || context === void 0 ? void 0 : context._cognigy) === null || _v === void 0 ? void 0 : _v.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1267
+ transcript: ((_z = context === null || context === void 0 ? void 0 : context._cognigy) === null || _z === void 0 ? void 0 : _z.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1162
1268
  var _a;
1163
1269
  text = isStreamingChannel ? text : text.trim();
1164
1270
  if (text) {
@@ -1182,45 +1288,51 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1182
1288
  };
1183
1289
  }
1184
1290
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1185
- (_w = api.setAnalyticsData) === null || _w === void 0 ? void 0 : _w.call(api, "understood", "true");
1291
+ (_0 = api.setAnalyticsData) === null || _0 === void 0 ? void 0 : _0.call(api, "understood", "true");
1186
1292
  input.understood = true;
1187
- const fullLlmResult = await ((_x = api.runGenerativeAIPrompt) === null || _x === void 0 ? void 0 : _x.call(api, llmPromptOptions, "aiAgent"));
1293
+ const fullLlmResult = await ((_1 = api.runGenerativeAIPrompt) === null || _1 === void 0 ? void 0 : _1.call(api, llmPromptOptions, "aiAgent"));
1188
1294
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1189
1295
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1190
1296
  const tokenUsage = fullLlmResult.tokenUsage;
1191
1297
  // Send optional debug message with token usage
1192
1298
  if (debugLogTokenCount && tokenUsage) {
1193
- (_y = api.logDebugMessage) === null || _y === void 0 ? void 0 : _y.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1299
+ (_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1194
1300
  }
1195
1301
  // Identify if the result is a tool call
1196
1302
  // If response is a tool call, set next node for Tools
1197
1303
  if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
1198
1304
  const mainToolCall = llmResult.toolCalls[0];
1305
+ let isMcpToolCall = false;
1199
1306
  // Find the child node with the toolId of the tool call
1200
- const toolChild = childConfigs.find(child => { var _a, _b; return child.type === "aiAgentJobTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
1307
+ let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "aiAgentJobTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
1308
+ if (!toolChild && toolMap.has(mainToolCall.function.name)) {
1309
+ // If the tool call is from an MCP tool, set the next node to the corresponding child node
1310
+ toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
1311
+ isMcpToolCall = true;
1312
+ }
1201
1313
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1202
- (_z = api.logDebugError) === null || _z === void 0 ? void 0 : _z.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1314
+ (_3 = api.logDebugError) === null || _3 === void 0 ? void 0 : _3.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1203
1315
  }
1204
1316
  // Add last tool call to session state for loading it from Tool Answer Node
1205
1317
  api.updateSessionStateValues({
1206
- lastToolCall: {
1207
- llmProvider,
1208
- aiAgentJobNode: {
1318
+ lastToolCall: Object.assign(Object.assign({ llmProvider, aiAgentJobNode: {
1209
1319
  flow: flowReferenceId,
1210
- node: nodeId
1211
- },
1212
- toolCall: mainToolCall,
1213
- }
1320
+ node: nodeId,
1321
+ } }, (isMcpToolCall && {
1322
+ mcpServerUrl: (_4 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _4 === void 0 ? void 0 : _4.mcpServerUrl,
1323
+ timeout: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.timeout,
1324
+ mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1325
+ })), { toolCall: mainToolCall }),
1214
1326
  });
1215
1327
  // if there are any parameters/arguments, add them to the input slots
1216
1328
  if (mainToolCall.function.arguments) {
1217
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_1 = (_0 = input.aiAgent) === null || _0 === void 0 ? void 0 : _0.toolArgs) !== null && _1 !== void 0 ? _1 : {}), mainToolCall.function.arguments) });
1329
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_7 = (_6 = input.aiAgent) === null || _6 === void 0 ? void 0 : _6.toolArgs) !== null && _7 !== void 0 ? _7 : {}), mainToolCall.function.arguments) });
1218
1330
  }
1219
1331
  // Debug Message for Tool Calls, configured in the Tool Node
1220
- if ((_2 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _2 === void 0 ? void 0 : _2.debugMessage) {
1332
+ if ((_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.debugMessage) {
1221
1333
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
1222
1334
  // Arguments / Parameters Slots
1223
- const slots = ((_3 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _3 === void 0 ? void 0 : _3.arguments) && Object.keys(mainToolCall.function.arguments);
1335
+ const slots = ((_9 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _9 === void 0 ? void 0 : _9.arguments) && Object.keys(mainToolCall.function.arguments);
1224
1336
  const hasSlots = slots && slots.length > 0;
1225
1337
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1226
1338
  if (hasSlots) {
@@ -1235,7 +1347,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1235
1347
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1236
1348
  });
1237
1349
  }
1238
- (_4 = api.logDebugMessage) === null || _4 === void 0 ? void 0 : _4.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1350
+ (_10 = api.logDebugMessage) === null || _10 === void 0 ? void 0 : _10.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1239
1351
  }
1240
1352
  if (toolChild) {
1241
1353
  api.setNextNode(toolChild.id);
@@ -1260,7 +1372,17 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1260
1372
  }
1261
1373
  // Optionally output the result immediately
1262
1374
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1263
- await ((_5 = api.output) === null || _5 === void 0 ? void 0 : _5.call(api, llmResult.result, {}));
1375
+ await ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, llmResult.result, {}));
1376
+ }
1377
+ else if (llmResult.finishReason && llmPromptOptions.stream) {
1378
+ // send the finishReason as last output for a stream
1379
+ (_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, "", {
1380
+ _cognigy: {
1381
+ _preventTranscript: true,
1382
+ _messageId,
1383
+ _finishReason: llmResult.finishReason,
1384
+ }
1385
+ });
1264
1386
  }
1265
1387
  // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
1266
1388
  if (llmResult.result && llmPromptOptions.stream) {
@@ -1277,7 +1399,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1277
1399
  }
1278
1400
  // Add response to Cognigy Input/Context for further usage
1279
1401
  if (storeLocation === "context") {
1280
- (_6 = api.addToContext) === null || _6 === void 0 ? void 0 : _6.call(api, contextKey, llmResult, "simple");
1402
+ (_13 = api.addToContext) === null || _13 === void 0 ? void 0 : _13.call(api, contextKey, llmResult, "simple");
1281
1403
  }
1282
1404
  else if (storeLocation === "input") {
1283
1405
  api.addToInput(inputKey, llmResult);
@@ -1290,14 +1412,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1290
1412
  const errorDetails = {
1291
1413
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1292
1414
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1293
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_7 = error.originalErrorDetails) === null || _7 === void 0 ? void 0 : _7.message),
1415
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_14 = error.originalErrorDetails) === null || _14 === void 0 ? void 0 : _14.message),
1294
1416
  };
1295
- (_8 = api.emitEvent) === null || _8 === void 0 ? void 0 : _8.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1417
+ (_15 = api.emitEvent) === null || _15 === void 0 ? void 0 : _15.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1296
1418
  if (logErrorToSystem) {
1297
- (_9 = api.log) === null || _9 === void 0 ? void 0 : _9.call(api, "error", JSON.stringify(errorDetails));
1419
+ (_16 = api.log) === null || _16 === void 0 ? void 0 : _16.call(api, "error", JSON.stringify(errorDetails));
1298
1420
  }
1299
1421
  if (errorHandling !== "stop") {
1300
- (_10 = api.logDebugError) === null || _10 === void 0 ? void 0 : _10.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1422
+ (_17 = api.logDebugError) === null || _17 === void 0 ? void 0 : _17.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1301
1423
  }
1302
1424
  if (storeErrorInInput) {
1303
1425
  input.aiAgent = input.aiAgent || {};
@@ -1306,7 +1428,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1306
1428
  if (errorHandling === "continue") {
1307
1429
  // output the timeout message
1308
1430
  if (errorMessage) {
1309
- await ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, errorMessage, null));
1431
+ await ((_18 = api.output) === null || _18 === void 0 ? void 0 : _18.call(api, errorMessage, null));
1310
1432
  }
1311
1433
  // Set default node as next node
1312
1434
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1318,7 +1440,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1318
1440
  if (!errorHandlingGotoTarget) {
1319
1441
  throw new Error("GoTo Target is required");
1320
1442
  }
1321
- if (!((_12 = api.checkThink) === null || _12 === void 0 ? void 0 : _12.call(api, nodeId))) {
1443
+ if (!((_19 = api.checkThink) === null || _19 === void 0 ? void 0 : _19.call(api, nodeId))) {
1322
1444
  api.resetNextNodes();
1323
1445
  await api.executeFlow({
1324
1446
  flowNode: {