@cognigy/rest-api-client 4.100.0 → 2025.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/awsBedrockIAMProviderConnection.js +11 -0
  3. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +5 -1
  4. package/build/shared/charts/descriptors/message/question/question.js +12 -32
  5. package/build/shared/charts/descriptors/message/say.js +10 -7
  6. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +90 -64
  7. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +2 -2
  8. package/build/shared/charts/descriptors/service/aiAgent/helpers/createSystemMessage.js +22 -8
  9. package/build/shared/charts/descriptors/service/handoverV2.js +0 -6
  10. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +7 -4
  11. package/build/shared/constants.js +0 -1
  12. package/build/shared/interfaces/IOrganisation.js +32 -8
  13. package/build/shared/interfaces/handover.js +21 -5
  14. package/build/shared/interfaces/messageAPI/endpoints.js +12 -2
  15. package/build/shared/interfaces/messageAPI/handover.js +34 -6
  16. package/build/shared/interfaces/resources/IEndpoint.js +1 -0
  17. package/build/shared/interfaces/resources/IWebrtcWidgetConfig.js +1 -0
  18. package/build/shared/interfaces/resources/TWebhookChannelType.js +5 -0
  19. package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/awsBedrockIAMProviderConnection.js +8 -0
  20. package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +3 -0
  21. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -32
  22. package/dist/esm/shared/charts/descriptors/message/say.js +10 -7
  23. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +91 -65
  24. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +2 -2
  25. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createSystemMessage.js +20 -7
  26. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +0 -6
  27. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +7 -4
  28. package/dist/esm/shared/constants.js +0 -1
  29. package/dist/esm/shared/interfaces/IOrganisation.js +32 -8
  30. package/dist/esm/shared/interfaces/handover.js +21 -5
  31. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +11 -1
  32. package/dist/esm/shared/interfaces/messageAPI/handover.js +34 -6
  33. package/dist/esm/shared/interfaces/resources/IEndpoint.js +1 -0
  34. package/dist/esm/shared/interfaces/resources/IWebrtcWidgetConfig.js +1 -0
  35. package/dist/esm/shared/interfaces/resources/TWebhookChannelType.js +5 -0
  36. package/package.json +1 -1
  37. package/types/index.d.ts +30 -12
@@ -740,27 +740,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
740
740
  },
741
741
  defaultValue: true,
742
742
  },
743
- process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && {
744
- key: COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE,
745
- type: "cognigyText",
746
- label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE__LABEL",
747
- description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE__DESCRIPTION",
748
- condition: {
749
- key: "escalateAnswersAction",
750
- value: "handover"
751
- }
752
- },
753
- {
754
- key: "escalateAnswersRepeatHandoverMessage",
755
- type: "toggle",
756
- label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__LABEL",
757
- description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__DESCRIPTION",
758
- defaultValue: false,
759
- condition: {
760
- key: "escalateAnswersAction",
761
- value: "handover"
762
- }
763
- },
764
743
  {
765
744
  key: "escalateAnswersHandoverCancelIntent",
766
745
  type: "cognigyText",
@@ -1716,7 +1695,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1716
1695
  "escalateAnswersHandoverSendResolveEvent",
1717
1696
  "escalateAnswersHandoverAdditionalCategoryIds",
1718
1697
  process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_HANDOVER_LIVE_AGENT_INBOX_ID,
1719
- process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE,
1720
1698
  process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_ALLOW_AGENT_INJECT,
1721
1699
  "escalateAnswersHandoverChatwootInboxId",
1722
1700
  "escalateAnswersHandoverSendTranscriptAsFirstMessage",
@@ -1838,7 +1816,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1838
1816
  //#endregion DescriptorFields
1839
1817
  function: ({ cognigy, nodeId, organisationId, config, inputOptions }) => __awaiter(void 0, void 0, void 0, function* () {
1840
1818
  var _a, _b, _c;
1841
- const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, preventTranscript, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1819
+ const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, preventTranscript, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1842
1820
  const { input, context, profile, api } = cognigy;
1843
1821
  const rephraseWithAIParams = {
1844
1822
  generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
@@ -1981,11 +1959,11 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1981
1959
  }
1982
1960
  // output reconfirmation reprompt
1983
1961
  if (reconfirmationQuestionReprompt) {
1984
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestionReprompt] } } });
1962
+ yield SAY.function({ nodeType: "question.reconfirmation.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestionReprompt] } } });
1985
1963
  }
1986
1964
  // output reconfirmationQuestion
1987
1965
  const tentativeAnswerShortform = (activeQuestion === null || activeQuestion === void 0 ? void 0 : activeQuestion.tentativeAnswerShortform) || "";
1988
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestion.replace("[ANSWER]", tentativeAnswerShortform)] } } });
1966
+ yield SAY.function({ nodeType: "question.reconfirmation", cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestion.replace("[ANSWER]", tentativeAnswerShortform)] } } });
1989
1967
  // set myself as next node and stop
1990
1968
  api.setNextNode(nodeId);
1991
1969
  api.stopExecution();
@@ -2051,7 +2029,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2051
2029
  if (isFirstExecution && !parseResultOnEntry) {
2052
2030
  rephraseWithAIParams.promptType = "question";
2053
2031
  rephraseWithAIParams.questionType = config.type;
2054
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, say }, rephraseWithAIParams) });
2032
+ yield SAY.function({ nodeType: "question.initial", cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, say }, rephraseWithAIParams) });
2055
2033
  if (config.type === "date" && !config.datepicker_hidePicker) {
2056
2034
  showDatePicker(cognigy, config);
2057
2035
  }
@@ -2089,7 +2067,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2089
2067
  // remember a shortform of the given answer in the activeQuestion object
2090
2068
  activeQuestion.tentativeAnswerShortform = generateAnswerShortForm(type, result, cognigy.input.text);
2091
2069
  // Output reconfirmationQuestion
2092
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestion.replace("[ANSWER]", activeQuestion.tentativeAnswerShortform)] } } });
2070
+ yield SAY.function({ nodeType: "question.reconfirmation", cognigy, childConfigs: [], nodeId, organisationId, config: { handoverOutput, preventTranscript, say: { type: "text", text: [reconfirmationQuestion.replace("[ANSWER]", activeQuestion.tentativeAnswerShortform)] } } });
2093
2071
  // remember that we are in reconfirmation mode and stop
2094
2072
  api.setSystemContext("reconfirmationQuestionInProgress", true);
2095
2073
  // we need to store the input, to be able to restore it afterwards
@@ -2210,6 +2188,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2210
2188
  return;
2211
2189
  case "text":
2212
2190
  yield SAY.function({
2191
+ nodeType: "question.escalation",
2213
2192
  cognigy,
2214
2193
  childConfigs: [],
2215
2194
  nodeId,
@@ -2255,7 +2234,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2255
2234
  eightByEightJSONProps: escalateIntentsHandoverEightByEightJSONProps || [],
2256
2235
  sendResolveEvent: escalateIntentsHandoverSendResolveEvent,
2257
2236
  resolveBehavior: escalateIntentsHandoverResolveBehavior,
2258
- agentAssistInitMessage: escalateIntentsAgentAssistInitMessage,
2259
2237
  allowAgentInject: escalateIntentsAllowAgentInject,
2260
2238
  sendOnActiveEvent: escalateIntentsSendOnActiveEvent,
2261
2239
  sendOnQueueEvent: escalateIntentsSendOnQueueEvent,
@@ -2359,6 +2337,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2359
2337
  return;
2360
2338
  case "text":
2361
2339
  yield SAY.function({
2340
+ nodeType: "question.escalation",
2362
2341
  cognigy,
2363
2342
  childConfigs: [],
2364
2343
  nodeId,
@@ -2404,7 +2383,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2404
2383
  eightByEightJSONProps: escalateAnswersHandoverEightByEightJSONProps || [],
2405
2384
  sendResolveEvent: escalateAnswersHandoverSendResolveEvent,
2406
2385
  resolveBehavior: escalateAnswersHandoverResolveBehavior,
2407
- agentAssistInitMessage: escalateAnswersAgentAssistInitMessage,
2408
2386
  allowAgentInject: escalateAnswersAllowAgentInject,
2409
2387
  sendOnActiveEvent: escalateAnswersSendOnActiveEvent,
2410
2388
  sendOnQueueEvent: escalateAnswersSendOnQueueEvent,
@@ -2441,7 +2419,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2441
2419
  if (sayReprompt) {
2442
2420
  switch (repromptType) {
2443
2421
  case "say":
2444
- yield SAY.function(Object.assign({ cognigy, childConfigs: [], nodeId, organisationId, config: { preventTranscript, say: repromptSay } }, rephraseWithAIParams));
2422
+ yield SAY.function(Object.assign({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: { preventTranscript, say: repromptSay } }, rephraseWithAIParams));
2445
2423
  break;
2446
2424
  case "execute":
2447
2425
  // if a question reprompt is set to execute flow and we have just executed
@@ -2490,11 +2468,11 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2490
2468
  }
2491
2469
  const repromptMessage = yield api.runGenerativeAIPrompt(data, "gptPromptNode");
2492
2470
  if (!repromptLLMStream) {
2493
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2471
+ yield SAY.function({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2494
2472
  }
2495
2473
  break;
2496
2474
  default: // this is also "text"
2497
- yield SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
2475
+ yield SAY.function({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
2498
2476
  }
2499
2477
  }
2500
2478
  /* If repeat toggle is on, also output question (and maybe datepicker) again */
@@ -2502,6 +2480,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2502
2480
  rephraseWithAIParams.promptType = "question";
2503
2481
  rephraseWithAIParams.questionType = config.type;
2504
2482
  yield SAY.function({
2483
+ nodeType: "question.repeat",
2505
2484
  cognigy,
2506
2485
  childConfigs: [],
2507
2486
  nodeId,
@@ -2518,6 +2497,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2518
2497
  rephraseWithAIParams.promptType = "question";
2519
2498
  rephraseWithAIParams.questionType = config.type;
2520
2499
  yield SAY.function({
2500
+ nodeType: "question.initial",
2521
2501
  cognigy,
2522
2502
  childConfigs: [],
2523
2503
  nodeId,
@@ -86,8 +86,8 @@ export const SAY = createNodeDescriptor({
86
86
  type: "sayNode",
87
87
  },
88
88
  tags: ["basic", "message"],
89
- function: ({ cognigy, config, nodeId, organisationId }) => __awaiter(void 0, void 0, void 0, function* () {
90
- var _a, _b, _c, _d, _e, _f;
89
+ function: ({ cognigy, config, nodeId, organisationId, nodeType }) => __awaiter(void 0, void 0, void 0, function* () {
90
+ var _a, _b, _c, _d, _e, _f, _g;
91
91
  const { api } = cognigy;
92
92
  const { text, loop, linear, type, _data } = config.say;
93
93
  /*Say nodes are always forwardable */
@@ -143,28 +143,31 @@ export const SAY = createNodeDescriptor({
143
143
  sayData._cognigy = sayData._cognigy || {};
144
144
  sayData._cognigy._preventTranscript = true;
145
145
  }
146
+ if (((_b = cognigy === null || cognigy === void 0 ? void 0 : cognigy.input) === null || _b === void 0 ? void 0 : _b.channel) === "voiceGateway2") {
147
+ sayData = Object.assign(Object.assign({}, sayData), { nodeType });
148
+ }
146
149
  outputText = yield rephraseSentenceWithAI(outputText, config, api, organisationId);
147
150
  yield api.say(outputText, sayData, settings);
148
151
  }
149
152
  else {
150
- const _cognigyDefault = (_b = _data === null || _data === void 0 ? void 0 : _data._cognigy) === null || _b === void 0 ? void 0 : _b._default;
153
+ const _cognigyDefault = (_c = _data === null || _data === void 0 ? void 0 : _data._cognigy) === null || _c === void 0 ? void 0 : _c._default;
151
154
  const typeKey = "_" + type;
152
155
  if (type === "quickReplies" || type === "buttons") {
153
- if (((_c = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _c === void 0 ? void 0 : _c.text) && (_cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey].fallbackText)) {
156
+ if (((_d = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _d === void 0 ? void 0 : _d.text) && (_cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey].fallbackText)) {
154
157
  const result = yield rephraseMultipleSentencesWithAI([_cognigyDefault[typeKey].text, _cognigyDefault[typeKey].fallbackText], config, api, organisationId);
155
158
  if (result.length === 2) {
156
159
  _cognigyDefault[typeKey].text = result[0];
157
160
  _cognigyDefault[typeKey].fallbackText = result[1];
158
161
  }
159
162
  }
160
- else if ((_d = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _d === void 0 ? void 0 : _d.text) {
163
+ else if ((_e = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _e === void 0 ? void 0 : _e.text) {
161
164
  _cognigyDefault[typeKey].text = yield rephraseSentenceWithAI(_cognigyDefault[typeKey].text, config, api, organisationId);
162
165
  }
163
- else if ((_e = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _e === void 0 ? void 0 : _e.fallbackText) {
166
+ else if ((_f = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _f === void 0 ? void 0 : _f.fallbackText) {
164
167
  _cognigyDefault[typeKey].fallbackText = yield rephraseSentenceWithAI(_cognigyDefault[typeKey].fallbackText, config, api, organisationId);
165
168
  }
166
169
  }
167
- else if ((_f = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _f === void 0 ? void 0 : _f.fallbackText) {
170
+ else if ((_g = _cognigyDefault === null || _cognigyDefault === void 0 ? void 0 : _cognigyDefault[typeKey]) === null || _g === void 0 ? void 0 : _g.fallbackText) {
168
171
  _cognigyDefault[typeKey].fallbackText = yield rephraseSentenceWithAI(_cognigyDefault[typeKey].fallbackText, config, api, organisationId);
169
172
  }
170
173
  const data = config.say;
@@ -5,10 +5,10 @@ import { randomUUID } from 'crypto';
5
5
  import { setSessionConfig } from "../../voice/mappers/setSessionConfig.mapper";
6
6
  import { voiceConfigParamsToVoiceSettings } from "../../voice/mappers/setSessionConfig.mapper";
7
7
  import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebugMode";
8
- import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
9
- import { createSystemMessage, validateToolId } from "./helpers/createSystemMessage";
8
+ import { createSystemMessage, validateToolId, getCognigyBrandMessage } from "./helpers/createSystemMessage";
10
9
  import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
11
10
  import { getUserMemory } from "./helpers/getUserMemory";
11
+ import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
12
12
  export const AI_AGENT_JOB = createNodeDescriptor({
13
13
  type: "aiAgentJob",
14
14
  defaultLabel: "AI Agent",
@@ -470,6 +470,20 @@ export const AI_AGENT_JOB = createNodeDescriptor({
470
470
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOKEN_COUNT__DESCRIPTION",
471
471
  defaultValue: false
472
472
  },
473
+ {
474
+ key: "debugLogSystemPrompt",
475
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_SYSTEM_PROMPT__LABEL",
476
+ type: "toggle",
477
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_SYSTEM_PROMPT__DESCRIPTION",
478
+ defaultValue: false
479
+ },
480
+ {
481
+ key: "debugLogToolDefinitions",
482
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
483
+ type: "toggle",
484
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
485
+ defaultValue: false
486
+ },
473
487
  {
474
488
  key: "debugResult",
475
489
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_KNOWLEDGE_RESULTS__LABEL",
@@ -802,6 +816,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
802
816
  "debugConfig",
803
817
  "debugResult",
804
818
  "debugLogTokenCount",
819
+ "debugLogSystemPrompt",
820
+ "debugLogToolDefinitions"
805
821
  ],
806
822
  }
807
823
  ],
@@ -822,9 +838,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
822
838
  ],
823
839
  tags: ["ai", "aiAgent"],
824
840
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
825
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19;
841
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
826
842
  const { api, context, input, profile, flowReferenceId } = cognigy;
827
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
843
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
828
844
  try {
829
845
  if (!aiAgent) {
830
846
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -872,7 +888,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
872
888
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
873
889
  }
874
890
  }
875
- const _20 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _20, cleanedProfile = __rest(_20, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
891
+ const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
876
892
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
877
893
  /**
878
894
  * ----- Knowledge Search Section -----
@@ -991,6 +1007,12 @@ export const AI_AGENT_JOB = createNodeDescriptor({
991
1007
  const isOnDemandKnowledgeStoreConfigured = knowledgeSearchBehavior === "onDemand" && ((knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) || (knowledgeSearchJobKnowledge && knowledgeSearchJobStore));
992
1008
  // create the system Message from the AI Agent resource and this Node's config storage
993
1009
  const systemMessage = createSystemMessage(aiAgent, input, jobName, jobDescription, jobInstructions, userMemory, memoryContextInjection, isOnDemandKnowledgeStoreConfigured ? "onDemand" : "none");
1010
+ // Optional Debug Message for system prompt if enabled
1011
+ if (debugLogSystemPrompt && systemMessage.length > 0) {
1012
+ // Replace the Cognigy brand message in the logged prompt
1013
+ const debugSystemMessage = (_r = (_q = systemMessage[0]) === null || _q === void 0 ? void 0 : _q.content) === null || _r === void 0 ? void 0 : _r.replace(`${getCognigyBrandMessage()}\n`, "");
1014
+ (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, debugSystemMessage, "UI__DEBUG_MODE__AI_AGENT_JOB__SYSTEM_PROMPT__HEADER");
1015
+ }
994
1016
  // Create Tools JSON
995
1017
  /** This is the list of tools that are used in the AI Agent Job */
996
1018
  const tools = [];
@@ -1059,12 +1081,12 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1059
1081
  message: error.message,
1060
1082
  }
1061
1083
  : error;
1062
- (_q = api.logDebugError) === null || _q === void 0 ? void 0 : _q.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1084
+ (_t = api.logDebugError) === null || _t === void 0 ? void 0 : _t.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1063
1085
  }
1064
1086
  if (mcpTools) {
1065
1087
  if (sendDebug) {
1066
1088
  if (mcpTools.length === 0) {
1067
- (_r = api.logDebugMessage) === null || _r === void 0 ? void 0 : _r.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1089
+ (_u = api.logDebugMessage) === null || _u === void 0 ? void 0 : _u.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1068
1090
  }
1069
1091
  if (mcpTools.length > 0) {
1070
1092
  const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
@@ -1084,7 +1106,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1084
1106
  });
1085
1107
  }
1086
1108
  });
1087
- (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "MCP Tool");
1109
+ (_v = api.logDebugMessage) === null || _v === void 0 ? void 0 : _v.call(api, messageLines.join("\n"), "MCP Tool");
1088
1110
  }
1089
1111
  }
1090
1112
  const filteredMcpTools = mcpTools.filter((tool) => {
@@ -1134,6 +1156,39 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1134
1156
  }
1135
1157
  }
1136
1158
  ;
1159
+ // we only add this tool if at least one knowledge source is enabled
1160
+ if (isOnDemandKnowledgeStoreConfigured) {
1161
+ const knowledgeTool = {
1162
+ type: "function",
1163
+ function: {
1164
+ name: "retrieve_knowledge",
1165
+ description: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
1166
+ parameters: {
1167
+ type: "object",
1168
+ properties: {
1169
+ generated_prompt: {
1170
+ type: "string",
1171
+ description: "Generated question including the context of the conversation (I want to know...)."
1172
+ },
1173
+ generated_buffer_phrase: {
1174
+ type: "string",
1175
+ description: "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
1176
+ },
1177
+ },
1178
+ required: ["generated_prompt", "generated_buffer_phrase"],
1179
+ additionalProperties: false
1180
+ }
1181
+ }
1182
+ };
1183
+ if (useStrict) {
1184
+ knowledgeTool.function.strict = true;
1185
+ }
1186
+ toolNames.push(knowledgeTool.function.name + " (internal)");
1187
+ tools.push(knowledgeTool);
1188
+ }
1189
+ if (debugLogToolDefinitions) {
1190
+ (_w = api.logDebugMessage) === null || _w === void 0 ? void 0 : _w.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
1191
+ }
1137
1192
  // Optional Debug Message with the config
1138
1193
  if (debugConfig) {
1139
1194
  const messageLines = [];
@@ -1141,10 +1196,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1141
1196
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
1142
1197
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
1143
1198
  // Safety settings
1144
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_t = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _t === void 0 ? void 0 : _t.avoidHarmfulContent}`);
1145
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidUngroundedContent}`);
1146
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidCopyrightInfringements}`);
1147
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.preventJailbreakAndManipulation}`);
1199
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_x = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _x === void 0 ? void 0 : _x.avoidHarmfulContent}`);
1200
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_y = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _y === void 0 ? void 0 : _y.avoidUngroundedContent}`);
1201
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_z = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _z === void 0 ? void 0 : _z.avoidCopyrightInfringements}`);
1202
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_0 = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _0 === void 0 ? void 0 : _0.preventJailbreakAndManipulation}`);
1148
1203
  // Tools
1149
1204
  if (toolNames.length > 0) {
1150
1205
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
@@ -1200,37 +1255,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1200
1255
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1201
1256
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1202
1257
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1203
- (_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1204
- }
1205
- // keep this after the debug message since the "retrieve_knowledge" tool is implicit
1206
- // we only add this tool if at least one knowledge source is enabled
1207
- if (isOnDemandKnowledgeStoreConfigured) {
1208
- const knowledgeTool = {
1209
- type: "function",
1210
- function: {
1211
- name: "retrieve_knowledge",
1212
- description: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
1213
- parameters: {
1214
- type: "object",
1215
- properties: {
1216
- generated_prompt: {
1217
- type: "string",
1218
- description: "Generated question including the context of the conversation (I want to know...)."
1219
- },
1220
- generated_buffer_phrase: {
1221
- type: "string",
1222
- description: "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
1223
- },
1224
- },
1225
- required: ["generated_prompt", "generated_buffer_phrase"],
1226
- additionalProperties: false
1227
- }
1228
- }
1229
- };
1230
- if (useStrict) {
1231
- knowledgeTool.function.strict = true;
1232
- }
1233
- tools.push(knowledgeTool);
1258
+ (_1 = api.logDebugMessage) === null || _1 === void 0 ? void 0 : _1.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1234
1259
  }
1235
1260
  const transcript = yield api.getTranscript({
1236
1261
  limit: 50,
@@ -1244,14 +1269,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1244
1269
  transcript.length > 0 &&
1245
1270
  transcript[transcript.length - 1].role === TranscriptRole.USER) {
1246
1271
  const userInput = transcript[transcript.length - 1];
1247
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_y = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _y === void 0 ? void 0 : _y.text) || input.text}`;
1272
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_2 = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _2 === void 0 ? void 0 : _2.text) || input.text}`;
1248
1273
  transcript[transcript.length - 1].payload.text = enhancedInput;
1249
1274
  }
1250
1275
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1251
1276
  const _messageId = randomUUID();
1252
1277
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1253
1278
  // Temp fix to override the transcript if needed
1254
- transcript: ((_z = context === null || context === void 0 ? void 0 : context._cognigy) === null || _z === void 0 ? void 0 : _z.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1279
+ transcript: ((_3 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _3 === void 0 ? void 0 : _3.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1255
1280
  var _a;
1256
1281
  text = isStreamingChannel ? text : text.trim();
1257
1282
  if (text) {
@@ -1275,15 +1300,15 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1275
1300
  };
1276
1301
  }
1277
1302
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1278
- (_0 = api.setAnalyticsData) === null || _0 === void 0 ? void 0 : _0.call(api, "understood", "true");
1303
+ (_4 = api.setAnalyticsData) === null || _4 === void 0 ? void 0 : _4.call(api, "understood", "true");
1279
1304
  input.understood = true;
1280
- const fullLlmResult = yield ((_1 = api.runGenerativeAIPrompt) === null || _1 === void 0 ? void 0 : _1.call(api, llmPromptOptions, "aiAgent"));
1305
+ const fullLlmResult = yield ((_5 = api.runGenerativeAIPrompt) === null || _5 === void 0 ? void 0 : _5.call(api, llmPromptOptions, "aiAgent"));
1281
1306
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1282
1307
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1283
1308
  const tokenUsage = fullLlmResult.tokenUsage;
1284
1309
  // Send optional debug message with token usage
1285
1310
  if (debugLogTokenCount && tokenUsage) {
1286
- (_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1311
+ (_6 = api.logDebugMessage) === null || _6 === void 0 ? void 0 : _6.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1287
1312
  }
1288
1313
  // Identify if the result is a tool call
1289
1314
  // If response is a tool call, set next node for Tools
@@ -1298,7 +1323,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1298
1323
  isMcpToolCall = true;
1299
1324
  }
1300
1325
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1301
- (_3 = api.logDebugError) === null || _3 === void 0 ? void 0 : _3.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1326
+ (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1302
1327
  }
1303
1328
  // Add last tool call to session state for loading it from Tool Answer Node
1304
1329
  api.updateSessionStateValues({
@@ -1306,20 +1331,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1306
1331
  flow: flowReferenceId,
1307
1332
  node: nodeId,
1308
1333
  } }, (isMcpToolCall && {
1309
- mcpServerUrl: (_4 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _4 === void 0 ? void 0 : _4.mcpServerUrl,
1310
- timeout: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.timeout,
1334
+ mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1335
+ timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1311
1336
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1312
1337
  })), { toolCall: mainToolCall }),
1313
1338
  });
1314
1339
  // if there are any parameters/arguments, add them to the input slots
1315
1340
  if (mainToolCall.function.arguments) {
1316
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_7 = (_6 = input.aiAgent) === null || _6 === void 0 ? void 0 : _6.toolArgs) !== null && _7 !== void 0 ? _7 : {}), mainToolCall.function.arguments) });
1341
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1317
1342
  }
1318
1343
  // Debug Message for Tool Calls, configured in the Tool Node
1319
- if ((_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.debugMessage) {
1320
- const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
1344
+ if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1345
+ const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1346
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1321
1347
  // Arguments / Parameters Slots
1322
- const slots = ((_9 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _9 === void 0 ? void 0 : _9.arguments) && Object.keys(mainToolCall.function.arguments);
1348
+ const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1323
1349
  const hasSlots = slots && slots.length > 0;
1324
1350
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1325
1351
  if (hasSlots) {
@@ -1334,7 +1360,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1334
1360
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1335
1361
  });
1336
1362
  }
1337
- (_10 = api.logDebugMessage) === null || _10 === void 0 ? void 0 : _10.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1363
+ (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1338
1364
  }
1339
1365
  if (toolChild) {
1340
1366
  api.setNextNode(toolChild.id);
@@ -1359,11 +1385,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1359
1385
  }
1360
1386
  // Optionally output the result immediately
1361
1387
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1362
- yield ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, llmResult.result, {}));
1388
+ yield ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1363
1389
  }
1364
1390
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1365
1391
  // send the finishReason as last output for a stream
1366
- (_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, "", {
1392
+ (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1367
1393
  _cognigy: {
1368
1394
  _preventTranscript: true,
1369
1395
  _messageId,
@@ -1386,7 +1412,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1386
1412
  }
1387
1413
  // Add response to Cognigy Input/Context for further usage
1388
1414
  if (storeLocation === "context") {
1389
- (_13 = api.addToContext) === null || _13 === void 0 ? void 0 : _13.call(api, contextKey, llmResult, "simple");
1415
+ (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1390
1416
  }
1391
1417
  else if (storeLocation === "input") {
1392
1418
  api.addToInput(inputKey, llmResult);
@@ -1399,14 +1425,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1399
1425
  const errorDetails = {
1400
1426
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1401
1427
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1402
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_14 = error.originalErrorDetails) === null || _14 === void 0 ? void 0 : _14.message),
1428
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1403
1429
  };
1404
- (_15 = api.emitEvent) === null || _15 === void 0 ? void 0 : _15.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1430
+ (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1405
1431
  if (logErrorToSystem) {
1406
- (_16 = api.log) === null || _16 === void 0 ? void 0 : _16.call(api, "error", JSON.stringify(errorDetails));
1432
+ (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1407
1433
  }
1408
1434
  if (errorHandling !== "stop") {
1409
- (_17 = api.logDebugError) === null || _17 === void 0 ? void 0 : _17.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1435
+ (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1410
1436
  }
1411
1437
  if (storeErrorInInput) {
1412
1438
  input.aiAgent = input.aiAgent || {};
@@ -1415,7 +1441,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1415
1441
  if (errorHandling === "continue") {
1416
1442
  // output the timeout message
1417
1443
  if (errorMessage) {
1418
- yield ((_18 = api.output) === null || _18 === void 0 ? void 0 : _18.call(api, errorMessage, null));
1444
+ yield ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1419
1445
  }
1420
1446
  // Set default node as next node
1421
1447
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1427,7 +1453,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1427
1453
  if (!errorHandlingGotoTarget) {
1428
1454
  throw new Error("GoTo Target is required");
1429
1455
  }
1430
- if (!((_19 = api.checkThink) === null || _19 === void 0 ? void 0 : _19.call(api, nodeId))) {
1456
+ if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1431
1457
  api.resetNextNodes();
1432
1458
  yield api.executeFlow({
1433
1459
  flowNode: {
@@ -57,8 +57,8 @@ export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
57
57
  },
58
58
  {
59
59
  key: "mcpServerUrl",
60
- label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_SSE_URL__LABEL",
61
- description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_SSE_URL__DESCRIPTION",
60
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_URL__LABEL",
61
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_URL__DESCRIPTION",
62
62
  type: "cognigyText",
63
63
  params: {
64
64
  required: true,