@cognigy/rest-api-client 2025.10.0 → 2025.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/RestAPIClient.js +7 -0
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +58 -0
  4. package/build/apigroups/index.js +3 -1
  5. package/build/shared/charts/descriptors/index.js +2 -1
  6. package/build/shared/charts/descriptors/message/question/question.js +13 -29
  7. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +90 -64
  8. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +2 -2
  9. package/build/shared/charts/descriptors/service/aiAgent/helpers/createSystemMessage.js +22 -8
  10. package/build/shared/charts/descriptors/service/aiOpsCenterConnection.js +12 -0
  11. package/build/shared/charts/descriptors/service/handoverV2.js +0 -6
  12. package/build/shared/charts/descriptors/service/index.js +3 -1
  13. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +2 -2
  14. package/build/shared/constants.js +0 -1
  15. package/build/shared/interfaces/IOrganisation.js +32 -8
  16. package/build/shared/interfaces/messageAPI/endpoints.js +12 -2
  17. package/build/shared/interfaces/resources/IAuditEvent.js +3 -0
  18. package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  19. package/build/shared/interfaces/resources/TResourceType.js +2 -0
  20. package/build/shared/interfaces/resources/TRestChannelType.js +5 -0
  21. package/build/shared/interfaces/resources/TWebhookChannelType.js +5 -0
  22. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +1 -1
  23. package/build/shared/interfaces/restAPI/simulation/simulation/ICreateSimulationRest_2_0.js +3 -0
  24. package/build/shared/interfaces/restAPI/simulation/simulation/IDeleteSimulationRest_2_0.js +3 -0
  25. package/build/shared/interfaces/restAPI/simulation/simulation/IIndexSimulationRest_2_0.js +3 -0
  26. package/build/shared/interfaces/restAPI/simulation/simulation/IReadSimulationRest_2_0.js +3 -0
  27. package/build/shared/interfaces/restAPI/simulation/simulation/IScheduleSimulationRest_2_0.js +3 -0
  28. package/build/shared/interfaces/restAPI/simulation/simulation/ISimulationRest_2_0.js +9 -0
  29. package/build/shared/interfaces/restAPI/simulation/simulation/IUpdateSimulationRest_2_0.js +3 -0
  30. package/build/shared/interfaces/restAPI/simulation/simulationRun/IIndexSimulationRunRest_2_0.js +3 -0
  31. package/build/shared/interfaces/restAPI/simulation/simulationRun/IReadSimulationRunRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +19 -0
  33. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IGetAllSimulationRunBatchRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IIndexSimulationRunBatchRest_2_0.js +3 -0
  35. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IReadSimulationRunBatchRest_2_0.js +3 -0
  36. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/ISimulationRunBatchRest_2_0.js +9 -0
  37. package/dist/esm/RestAPIClient.js +7 -0
  38. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +44 -0
  39. package/dist/esm/apigroups/index.js +1 -0
  40. package/dist/esm/shared/charts/descriptors/index.js +3 -2
  41. package/dist/esm/shared/charts/descriptors/message/question/question.js +13 -29
  42. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +91 -65
  43. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +2 -2
  44. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createSystemMessage.js +20 -7
  45. package/dist/esm/shared/charts/descriptors/service/aiOpsCenterConnection.js +9 -0
  46. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +0 -6
  47. package/dist/esm/shared/charts/descriptors/service/index.js +1 -0
  48. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +2 -2
  49. package/dist/esm/shared/constants.js +0 -1
  50. package/dist/esm/shared/interfaces/IOrganisation.js +32 -8
  51. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +11 -1
  52. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +3 -0
  53. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  54. package/dist/esm/shared/interfaces/resources/TResourceType.js +2 -0
  55. package/dist/esm/shared/interfaces/resources/TRestChannelType.js +5 -0
  56. package/dist/esm/shared/interfaces/resources/TWebhookChannelType.js +5 -0
  57. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +1 -1
  58. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ICreateSimulationRest_2_0.js +2 -0
  59. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IDeleteSimulationRest_2_0.js +2 -0
  60. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IIndexSimulationRest_2_0.js +2 -0
  61. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IReadSimulationRest_2_0.js +2 -0
  62. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IScheduleSimulationRest_2_0.js +2 -0
  63. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ISimulationRest_2_0.js +6 -0
  64. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IUpdateSimulationRest_2_0.js +2 -0
  65. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/IIndexSimulationRunRest_2_0.js +2 -0
  66. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/IReadSimulationRunRest_2_0.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +16 -0
  68. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IGetAllSimulationRunBatchRest_2_0.js +2 -0
  69. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IIndexSimulationRunBatchRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IReadSimulationRunBatchRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/ISimulationRunBatchRest_2_0.js +6 -0
  72. package/package.json +1 -1
  73. package/types/index.d.ts +325 -13
package/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ # 2025.12.0
2
+ Released: June 10th, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v2025.12.0
5
+
6
+ # 2025.11.0
7
+ Released: May 27th, 2025
8
+
9
+ Released state of package up to date with Cognigy.AI v2025.11.0
10
+
1
11
  # 2025.10.0
2
12
  Released: May 13th, 2025
3
13
 
@@ -18,6 +18,7 @@ const ResourcesAPIGroup_2_0_1 = require("./apigroups/ResourcesAPIGroup_2_0");
18
18
  const SessionsAPIGroup_2_0_1 = require("./apigroups/SessionsAPIGroup_2_0");
19
19
  const logger_1 = require("./shared/helper/logger");
20
20
  const AIOpsCenterAPIGroup_2_0_1 = require("./apigroups/AIOpsCenterAPIGroup_2_0");
21
+ const SimulationAPIGroup_2_0_1 = require("./apigroups/SimulationAPIGroup_2_0");
21
22
  exports.RestAPIClient = function (config) {
22
23
  var _a;
23
24
  if (!(this instanceof exports.RestAPIClient)) {
@@ -92,6 +93,12 @@ exports.RestAPIClient = function (config) {
92
93
  plugins.push(AIOpsCenterAPIGroup_2_0_1.AIOpsCenterAPIGroup_2_0);
93
94
  break;
94
95
  }
96
+ switch (config.versions.simulation) {
97
+ default:
98
+ case "2.0":
99
+ plugins.push(SimulationAPIGroup_2_0_1.SimulationAPIGroup_2_0);
100
+ break;
101
+ }
95
102
  const Client = PluginBase_1.Base.plugin(plugins);
96
103
  Client.config = config;
97
104
  Client.prototype.getHttpAdapter = () => {
@@ -0,0 +1,58 @@
1
+ "use strict";
2
+ var __rest = (this && this.__rest) || function (s, e) {
3
+ var t = {};
4
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
5
+ t[p] = s[p];
6
+ if (s != null && typeof Object.getOwnPropertySymbols === "function")
7
+ for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
8
+ if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
9
+ t[p[i]] = s[p[i]];
10
+ }
11
+ return t;
12
+ };
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.SimulationAPIGroup_2_0 = void 0;
15
+ const GenericAPIFn_1 = require("../GenericAPIFn");
16
+ const query_1 = require("../shared/helper/rest/query");
17
+ function SimulationAPIGroup_2_0(instance) {
18
+ const self = instance;
19
+ return {
20
+ indexSimulations: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
21
+ createSimulation: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/simulations", "POST", self)(args, options),
22
+ updateSimulation: (_a, options) => {
23
+ var { simulationReference, projectId } = _a, args = __rest(_a, ["simulationReference", "projectId"]);
24
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}?${(0, query_1.stringifyQuery)({
25
+ projectId
26
+ })}`, "PATCH", self)(args, options);
27
+ },
28
+ deleteSimulation: ({ simulationReference, projectId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}?${(0, query_1.stringifyQuery)({
29
+ projectId
30
+ })}`, "DELETE", self)(undefined, options),
31
+ readSimulation: ({ simulationReference, projectId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}?${(0, query_1.stringifyQuery)({
32
+ projectId
33
+ })}`, "GET", self)(undefined, options),
34
+ scheduleSimulation: (_a, options) => {
35
+ var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
36
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/schedule`, "POST", self)(args, options);
37
+ },
38
+ indexSimulationRunBatches: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/batches?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
39
+ getAllSimulationRunBatches: (_a, options) => {
40
+ var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
41
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
42
+ },
43
+ readSimulationRunBatch: (_a, options) => {
44
+ var { simulationReference, simulationRunBatchReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference"]);
45
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
46
+ },
47
+ indexSimulationRuns: (_a, options) => {
48
+ var { simulationReference, simulationRunBatchReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference"]);
49
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
50
+ },
51
+ readSimulationRun: (_a, options) => {
52
+ var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
53
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
54
+ }
55
+ };
56
+ }
57
+ exports.SimulationAPIGroup_2_0 = SimulationAPIGroup_2_0;
58
+ //# sourceMappingURL=SimulationAPIGroup_2_0.js.map
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.AIOpsCenterAPIGroup_2_0 = exports.AnalyticsAPIGroup_2_0 = exports.SessionsAPIGroup_2_0 = exports.ResourcesAPIGroup_2_0 = exports.ManagementAPIGroup_2_0 = exports.MetricsAPIGroup_2_0 = exports.JWTAuthAPIGroup_2_0 = exports.InsightsAPIGroup_2_0 = exports.ExternalAPIGroup_2_0 = exports.AdministrationAPIGroup_2_1 = exports.AdministrationAPIGroup_2_0 = void 0;
3
+ exports.SimulationAPIGroup_2_0 = exports.AIOpsCenterAPIGroup_2_0 = exports.AnalyticsAPIGroup_2_0 = exports.SessionsAPIGroup_2_0 = exports.ResourcesAPIGroup_2_0 = exports.ManagementAPIGroup_2_0 = exports.MetricsAPIGroup_2_0 = exports.JWTAuthAPIGroup_2_0 = exports.InsightsAPIGroup_2_0 = exports.ExternalAPIGroup_2_0 = exports.AdministrationAPIGroup_2_1 = exports.AdministrationAPIGroup_2_0 = void 0;
4
4
  // AdministrationAPIGroup;
5
5
  var AdministrationAPIGroup_2_0_1 = require("./AdministrationAPIGroup_2_0");
6
6
  Object.defineProperty(exports, "AdministrationAPIGroup_2_0", { enumerable: true, get: function () { return AdministrationAPIGroup_2_0_1.AdministrationAPIGroup_2_0; } });
@@ -25,4 +25,6 @@ var AnalyticsAPIGroup_2_0_1 = require("./AnalyticsAPIGroup_2_0");
25
25
  Object.defineProperty(exports, "AnalyticsAPIGroup_2_0", { enumerable: true, get: function () { return AnalyticsAPIGroup_2_0_1.AnalyticsAPIGroup_2_0; } });
26
26
  var AIOpsCenterAPIGroup_2_0_1 = require("./AIOpsCenterAPIGroup_2_0");
27
27
  Object.defineProperty(exports, "AIOpsCenterAPIGroup_2_0", { enumerable: true, get: function () { return AIOpsCenterAPIGroup_2_0_1.AIOpsCenterAPIGroup_2_0; } });
28
+ var SimulationAPIGroup_2_0_1 = require("./SimulationAPIGroup_2_0");
29
+ Object.defineProperty(exports, "SimulationAPIGroup_2_0", { enumerable: true, get: function () { return SimulationAPIGroup_2_0_1.SimulationAPIGroup_2_0; } });
28
30
  //# sourceMappingURL=index.js.map
@@ -167,7 +167,8 @@ exports.cognigyBasicModule = (0, createNodeDescriptor_1.createExtension)({
167
167
  service_1.EIGHT_BY_EIGHT_CONNECTION,
168
168
  service_1.GENESYS_CLOUD_CONNECTION,
169
169
  service_1.GENESYS_CLOUD_CONNECTION_OM,
170
- service_1.NICECXONEAAH_AUTHENTICATION_CONNECTION
170
+ service_1.NICECXONEAAH_AUTHENTICATION_CONNECTION,
171
+ service_1.AIOPS_CENTER_WEBHOOKS_CONNECTION,
171
172
  ],
172
173
  });
173
174
  var mongoDB_1 = require("./connectionNodes/mongoDB");
@@ -742,27 +742,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
742
742
  },
743
743
  defaultValue: true,
744
744
  },
745
- process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && {
746
- key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE,
747
- type: "cognigyText",
748
- label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE__LABEL",
749
- description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE__DESCRIPTION",
750
- condition: {
751
- key: "escalateAnswersAction",
752
- value: "handover"
753
- }
754
- },
755
- {
756
- key: "escalateAnswersRepeatHandoverMessage",
757
- type: "toggle",
758
- label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__LABEL",
759
- description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__DESCRIPTION",
760
- defaultValue: false,
761
- condition: {
762
- key: "escalateAnswersAction",
763
- value: "handover"
764
- }
765
- },
766
745
  {
767
746
  key: "escalateAnswersHandoverCancelIntent",
768
747
  type: "cognigyText",
@@ -1718,7 +1697,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1718
1697
  "escalateAnswersHandoverSendResolveEvent",
1719
1698
  "escalateAnswersHandoverAdditionalCategoryIds",
1720
1699
  process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_HANDOVER_LIVE_AGENT_INBOX_ID,
1721
- process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_AGENT_ASSIST_INIT_MESSAGE,
1722
1700
  process.env.FEATURE_USE_COGNIGY_LIVE_AGENT && constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ESCALATE_ANSWERS_ALLOW_AGENT_INJECT,
1723
1701
  "escalateAnswersHandoverChatwootInboxId",
1724
1702
  "escalateAnswersHandoverSendTranscriptAsFirstMessage",
@@ -1840,7 +1818,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1840
1818
  //#endregion DescriptorFields
1841
1819
  function: async ({ cognigy, nodeId, organisationId, config, inputOptions }) => {
1842
1820
  var _a, _b, _c;
1843
- const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, preventTranscript, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1821
+ const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, preventTranscript, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1844
1822
  const { input, context, profile, api } = cognigy;
1845
1823
  const rephraseWithAIParams = {
1846
1824
  generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
@@ -2258,7 +2236,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2258
2236
  eightByEightJSONProps: escalateIntentsHandoverEightByEightJSONProps || [],
2259
2237
  sendResolveEvent: escalateIntentsHandoverSendResolveEvent,
2260
2238
  resolveBehavior: escalateIntentsHandoverResolveBehavior,
2261
- agentAssistInitMessage: escalateIntentsAgentAssistInitMessage,
2262
2239
  allowAgentInject: escalateIntentsAllowAgentInject,
2263
2240
  sendOnActiveEvent: escalateIntentsSendOnActiveEvent,
2264
2241
  sendOnQueueEvent: escalateIntentsSendOnQueueEvent,
@@ -2408,7 +2385,6 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2408
2385
  eightByEightJSONProps: escalateAnswersHandoverEightByEightJSONProps || [],
2409
2386
  sendResolveEvent: escalateAnswersHandoverSendResolveEvent,
2410
2387
  resolveBehavior: escalateAnswersHandoverResolveBehavior,
2411
- agentAssistInitMessage: escalateAnswersAgentAssistInitMessage,
2412
2388
  allowAgentInject: escalateAnswersAllowAgentInject,
2413
2389
  sendOnActiveEvent: escalateAnswersSendOnActiveEvent,
2414
2390
  sendOnQueueEvent: escalateAnswersSendOnQueueEvent,
@@ -2433,6 +2409,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2433
2409
  api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_5`, "Skipping Reprompt Message");
2434
2410
  sayReprompt = false;
2435
2411
  }
2412
+ let nodeType = "question";
2436
2413
  // We will only output a reprompt if the user is not in the first execution
2437
2414
  // and no skip condition is true
2438
2415
  if ((validationMessage || repromptType) && !isFirstExecution && sayReprompt) {
@@ -2443,9 +2420,13 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2443
2420
  rephraseWithAIParams.question = say.text[0];
2444
2421
  rephraseWithAIParams.answer = input.text;
2445
2422
  if (sayReprompt) {
2423
+ nodeType = "question.reprompt";
2446
2424
  switch (repromptType) {
2447
2425
  case "say":
2448
- await say_1.SAY.function(Object.assign({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: { preventTranscript, say: repromptSay } }, rephraseWithAIParams));
2426
+ if ((input === null || input === void 0 ? void 0 : input.channel) === "voiceGateway2" && !validationRepeat) {
2427
+ nodeType = "question";
2428
+ }
2429
+ await say_1.SAY.function(Object.assign({ nodeType, cognigy, childConfigs: [], nodeId, organisationId, config: { preventTranscript, say: repromptSay } }, rephraseWithAIParams));
2449
2430
  break;
2450
2431
  case "execute":
2451
2432
  // if a question reprompt is set to execute flow and we have just executed
@@ -2494,11 +2475,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2494
2475
  }
2495
2476
  const repromptMessage = await api.runGenerativeAIPrompt(data, "gptPromptNode");
2496
2477
  if (!repromptLLMStream) {
2497
- await say_1.SAY.function({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2478
+ await say_1.SAY.function({ nodeType, cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2498
2479
  }
2499
2480
  break;
2500
2481
  default: // this is also "text"
2501
- await say_1.SAY.function({ nodeType: "question.reprompt", cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
2482
+ if ((input === null || input === void 0 ? void 0 : input.channel) === "voiceGateway2" && !validationRepeat) {
2483
+ nodeType = "question";
2484
+ }
2485
+ await say_1.SAY.function({ nodeType, cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ preventTranscript, handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
2502
2486
  }
2503
2487
  }
2504
2488
  /* If repeat toggle is on, also output question (and maybe datepicker) again */
@@ -2523,7 +2507,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2523
2507
  rephraseWithAIParams.promptType = "question";
2524
2508
  rephraseWithAIParams.questionType = config.type;
2525
2509
  await say_1.SAY.function({
2526
- nodeType: "question.initial",
2510
+ nodeType,
2527
2511
  cognigy,
2528
2512
  childConfigs: [],
2529
2513
  nodeId,
@@ -18,10 +18,10 @@ const crypto_1 = require("crypto");
18
18
  const setSessionConfig_mapper_1 = require("../../voice/mappers/setSessionConfig.mapper");
19
19
  const setSessionConfig_mapper_2 = require("../../voice/mappers/setSessionConfig.mapper");
20
20
  const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDebugMode");
21
- const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
22
21
  const createSystemMessage_1 = require("./helpers/createSystemMessage");
23
22
  const generateSearchPrompt_1 = require("./helpers/generateSearchPrompt");
24
23
  const getUserMemory_1 = require("./helpers/getUserMemory");
24
+ const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
25
25
  exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
26
26
  type: "aiAgentJob",
27
27
  defaultLabel: "AI Agent",
@@ -483,6 +483,20 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
483
483
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOKEN_COUNT__DESCRIPTION",
484
484
  defaultValue: false
485
485
  },
486
+ {
487
+ key: "debugLogSystemPrompt",
488
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_SYSTEM_PROMPT__LABEL",
489
+ type: "toggle",
490
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_SYSTEM_PROMPT__DESCRIPTION",
491
+ defaultValue: false
492
+ },
493
+ {
494
+ key: "debugLogToolDefinitions",
495
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
496
+ type: "toggle",
497
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
498
+ defaultValue: false
499
+ },
486
500
  {
487
501
  key: "debugResult",
488
502
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_KNOWLEDGE_RESULTS__LABEL",
@@ -815,6 +829,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
815
829
  "debugConfig",
816
830
  "debugResult",
817
831
  "debugLogTokenCount",
832
+ "debugLogSystemPrompt",
833
+ "debugLogToolDefinitions"
818
834
  ],
819
835
  }
820
836
  ],
@@ -835,9 +851,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
835
851
  ],
836
852
  tags: ["ai", "aiAgent"],
837
853
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
838
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19;
854
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
839
855
  const { api, context, input, profile, flowReferenceId } = cognigy;
840
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
856
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
841
857
  try {
842
858
  if (!aiAgent) {
843
859
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -885,7 +901,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
885
901
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
886
902
  }
887
903
  }
888
- const _20 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _20, cleanedProfile = __rest(_20, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
904
+ const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
889
905
  const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
890
906
  /**
891
907
  * ----- Knowledge Search Section -----
@@ -1004,6 +1020,12 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1004
1020
  const isOnDemandKnowledgeStoreConfigured = knowledgeSearchBehavior === "onDemand" && ((knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) || (knowledgeSearchJobKnowledge && knowledgeSearchJobStore));
1005
1021
  // create the system Message from the AI Agent resource and this Node's config storage
1006
1022
  const systemMessage = (0, createSystemMessage_1.createSystemMessage)(aiAgent, input, jobName, jobDescription, jobInstructions, userMemory, memoryContextInjection, isOnDemandKnowledgeStoreConfigured ? "onDemand" : "none");
1023
+ // Optional Debug Message for system prompt if enabled
1024
+ if (debugLogSystemPrompt && systemMessage.length > 0) {
1025
+ // Replace the Cognigy brand message in the logged prompt
1026
+ const debugSystemMessage = (_r = (_q = systemMessage[0]) === null || _q === void 0 ? void 0 : _q.content) === null || _r === void 0 ? void 0 : _r.replace(`${(0, createSystemMessage_1.getCognigyBrandMessage)()}\n`, "");
1027
+ (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, debugSystemMessage, "UI__DEBUG_MODE__AI_AGENT_JOB__SYSTEM_PROMPT__HEADER");
1028
+ }
1007
1029
  // Create Tools JSON
1008
1030
  /** This is the list of tools that are used in the AI Agent Job */
1009
1031
  const tools = [];
@@ -1072,12 +1094,12 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1072
1094
  message: error.message,
1073
1095
  }
1074
1096
  : error;
1075
- (_q = api.logDebugError) === null || _q === void 0 ? void 0 : _q.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1097
+ (_t = api.logDebugError) === null || _t === void 0 ? void 0 : _t.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
1076
1098
  }
1077
1099
  if (mcpTools) {
1078
1100
  if (sendDebug) {
1079
1101
  if (mcpTools.length === 0) {
1080
- (_r = api.logDebugMessage) === null || _r === void 0 ? void 0 : _r.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1102
+ (_u = api.logDebugMessage) === null || _u === void 0 ? void 0 : _u.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
1081
1103
  }
1082
1104
  if (mcpTools.length > 0) {
1083
1105
  const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
@@ -1097,7 +1119,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1097
1119
  });
1098
1120
  }
1099
1121
  });
1100
- (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "MCP Tool");
1122
+ (_v = api.logDebugMessage) === null || _v === void 0 ? void 0 : _v.call(api, messageLines.join("\n"), "MCP Tool");
1101
1123
  }
1102
1124
  }
1103
1125
  const filteredMcpTools = mcpTools.filter((tool) => {
@@ -1147,6 +1169,39 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1147
1169
  }
1148
1170
  }
1149
1171
  ;
1172
+ // we only add this tool if at least one knowledge source is enabled
1173
+ if (isOnDemandKnowledgeStoreConfigured) {
1174
+ const knowledgeTool = {
1175
+ type: "function",
1176
+ function: {
1177
+ name: "retrieve_knowledge",
1178
+ description: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
1179
+ parameters: {
1180
+ type: "object",
1181
+ properties: {
1182
+ generated_prompt: {
1183
+ type: "string",
1184
+ description: "Generated question including the context of the conversation (I want to know...)."
1185
+ },
1186
+ generated_buffer_phrase: {
1187
+ type: "string",
1188
+ description: "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
1189
+ },
1190
+ },
1191
+ required: ["generated_prompt", "generated_buffer_phrase"],
1192
+ additionalProperties: false
1193
+ }
1194
+ }
1195
+ };
1196
+ if (useStrict) {
1197
+ knowledgeTool.function.strict = true;
1198
+ }
1199
+ toolNames.push(knowledgeTool.function.name + " (internal)");
1200
+ tools.push(knowledgeTool);
1201
+ }
1202
+ if (debugLogToolDefinitions) {
1203
+ (_w = api.logDebugMessage) === null || _w === void 0 ? void 0 : _w.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
1204
+ }
1150
1205
  // Optional Debug Message with the config
1151
1206
  if (debugConfig) {
1152
1207
  const messageLines = [];
@@ -1154,10 +1209,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1154
1209
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
1155
1210
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
1156
1211
  // Safety settings
1157
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_t = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _t === void 0 ? void 0 : _t.avoidHarmfulContent}`);
1158
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidUngroundedContent}`);
1159
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidCopyrightInfringements}`);
1160
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.preventJailbreakAndManipulation}`);
1212
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_x = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _x === void 0 ? void 0 : _x.avoidHarmfulContent}`);
1213
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_y = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _y === void 0 ? void 0 : _y.avoidUngroundedContent}`);
1214
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_z = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _z === void 0 ? void 0 : _z.avoidCopyrightInfringements}`);
1215
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_0 = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _0 === void 0 ? void 0 : _0.preventJailbreakAndManipulation}`);
1161
1216
  // Tools
1162
1217
  if (toolNames.length > 0) {
1163
1218
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
@@ -1213,37 +1268,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1213
1268
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1214
1269
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1215
1270
  messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1216
- (_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1217
- }
1218
- // keep this after the debug message since the "retrieve_knowledge" tool is implicit
1219
- // we only add this tool if at least one knowledge source is enabled
1220
- if (isOnDemandKnowledgeStoreConfigured) {
1221
- const knowledgeTool = {
1222
- type: "function",
1223
- function: {
1224
- name: "retrieve_knowledge",
1225
- description: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
1226
- parameters: {
1227
- type: "object",
1228
- properties: {
1229
- generated_prompt: {
1230
- type: "string",
1231
- description: "Generated question including the context of the conversation (I want to know...)."
1232
- },
1233
- generated_buffer_phrase: {
1234
- type: "string",
1235
- description: "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
1236
- },
1237
- },
1238
- required: ["generated_prompt", "generated_buffer_phrase"],
1239
- additionalProperties: false
1240
- }
1241
- }
1242
- };
1243
- if (useStrict) {
1244
- knowledgeTool.function.strict = true;
1245
- }
1246
- tools.push(knowledgeTool);
1271
+ (_1 = api.logDebugMessage) === null || _1 === void 0 ? void 0 : _1.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1247
1272
  }
1248
1273
  const transcript = await api.getTranscript({
1249
1274
  limit: 50,
@@ -1257,14 +1282,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1257
1282
  transcript.length > 0 &&
1258
1283
  transcript[transcript.length - 1].role === transcripts_1.TranscriptRole.USER) {
1259
1284
  const userInput = transcript[transcript.length - 1];
1260
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_y = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _y === void 0 ? void 0 : _y.text) || input.text}`;
1285
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_2 = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _2 === void 0 ? void 0 : _2.text) || input.text}`;
1261
1286
  transcript[transcript.length - 1].payload.text = enhancedInput;
1262
1287
  }
1263
1288
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1264
1289
  const _messageId = (0, crypto_1.randomUUID)();
1265
1290
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1266
1291
  // Temp fix to override the transcript if needed
1267
- transcript: ((_z = context === null || context === void 0 ? void 0 : context._cognigy) === null || _z === void 0 ? void 0 : _z.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1292
+ transcript: ((_3 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _3 === void 0 ? void 0 : _3.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1268
1293
  var _a;
1269
1294
  text = isStreamingChannel ? text : text.trim();
1270
1295
  if (text) {
@@ -1288,15 +1313,15 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1288
1313
  };
1289
1314
  }
1290
1315
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1291
- (_0 = api.setAnalyticsData) === null || _0 === void 0 ? void 0 : _0.call(api, "understood", "true");
1316
+ (_4 = api.setAnalyticsData) === null || _4 === void 0 ? void 0 : _4.call(api, "understood", "true");
1292
1317
  input.understood = true;
1293
- const fullLlmResult = await ((_1 = api.runGenerativeAIPrompt) === null || _1 === void 0 ? void 0 : _1.call(api, llmPromptOptions, "aiAgent"));
1318
+ const fullLlmResult = await ((_5 = api.runGenerativeAIPrompt) === null || _5 === void 0 ? void 0 : _5.call(api, llmPromptOptions, "aiAgent"));
1294
1319
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1295
1320
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1296
1321
  const tokenUsage = fullLlmResult.tokenUsage;
1297
1322
  // Send optional debug message with token usage
1298
1323
  if (debugLogTokenCount && tokenUsage) {
1299
- (_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1324
+ (_6 = api.logDebugMessage) === null || _6 === void 0 ? void 0 : _6.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1300
1325
  }
1301
1326
  // Identify if the result is a tool call
1302
1327
  // If response is a tool call, set next node for Tools
@@ -1311,7 +1336,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1311
1336
  isMcpToolCall = true;
1312
1337
  }
1313
1338
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1314
- (_3 = api.logDebugError) === null || _3 === void 0 ? void 0 : _3.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1339
+ (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1315
1340
  }
1316
1341
  // Add last tool call to session state for loading it from Tool Answer Node
1317
1342
  api.updateSessionStateValues({
@@ -1319,20 +1344,21 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1319
1344
  flow: flowReferenceId,
1320
1345
  node: nodeId,
1321
1346
  } }, (isMcpToolCall && {
1322
- mcpServerUrl: (_4 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _4 === void 0 ? void 0 : _4.mcpServerUrl,
1323
- timeout: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.timeout,
1347
+ mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1348
+ timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1324
1349
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1325
1350
  })), { toolCall: mainToolCall }),
1326
1351
  });
1327
1352
  // if there are any parameters/arguments, add them to the input slots
1328
1353
  if (mainToolCall.function.arguments) {
1329
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_7 = (_6 = input.aiAgent) === null || _6 === void 0 ? void 0 : _6.toolArgs) !== null && _7 !== void 0 ? _7 : {}), mainToolCall.function.arguments) });
1354
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1330
1355
  }
1331
1356
  // Debug Message for Tool Calls, configured in the Tool Node
1332
- if ((_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.debugMessage) {
1333
- const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
1357
+ if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1358
+ const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1359
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1334
1360
  // Arguments / Parameters Slots
1335
- const slots = ((_9 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _9 === void 0 ? void 0 : _9.arguments) && Object.keys(mainToolCall.function.arguments);
1361
+ const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1336
1362
  const hasSlots = slots && slots.length > 0;
1337
1363
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1338
1364
  if (hasSlots) {
@@ -1347,7 +1373,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1347
1373
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1348
1374
  });
1349
1375
  }
1350
- (_10 = api.logDebugMessage) === null || _10 === void 0 ? void 0 : _10.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1376
+ (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1351
1377
  }
1352
1378
  if (toolChild) {
1353
1379
  api.setNextNode(toolChild.id);
@@ -1372,11 +1398,11 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1372
1398
  }
1373
1399
  // Optionally output the result immediately
1374
1400
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1375
- await ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, llmResult.result, {}));
1401
+ await ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1376
1402
  }
1377
1403
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1378
1404
  // send the finishReason as last output for a stream
1379
- (_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, "", {
1405
+ (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1380
1406
  _cognigy: {
1381
1407
  _preventTranscript: true,
1382
1408
  _messageId,
@@ -1399,7 +1425,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1399
1425
  }
1400
1426
  // Add response to Cognigy Input/Context for further usage
1401
1427
  if (storeLocation === "context") {
1402
- (_13 = api.addToContext) === null || _13 === void 0 ? void 0 : _13.call(api, contextKey, llmResult, "simple");
1428
+ (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1403
1429
  }
1404
1430
  else if (storeLocation === "input") {
1405
1431
  api.addToInput(inputKey, llmResult);
@@ -1412,14 +1438,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1412
1438
  const errorDetails = {
1413
1439
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1414
1440
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1415
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_14 = error.originalErrorDetails) === null || _14 === void 0 ? void 0 : _14.message),
1441
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1416
1442
  };
1417
- (_15 = api.emitEvent) === null || _15 === void 0 ? void 0 : _15.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1443
+ (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1418
1444
  if (logErrorToSystem) {
1419
- (_16 = api.log) === null || _16 === void 0 ? void 0 : _16.call(api, "error", JSON.stringify(errorDetails));
1445
+ (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1420
1446
  }
1421
1447
  if (errorHandling !== "stop") {
1422
- (_17 = api.logDebugError) === null || _17 === void 0 ? void 0 : _17.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1448
+ (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1423
1449
  }
1424
1450
  if (storeErrorInInput) {
1425
1451
  input.aiAgent = input.aiAgent || {};
@@ -1428,7 +1454,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1428
1454
  if (errorHandling === "continue") {
1429
1455
  // output the timeout message
1430
1456
  if (errorMessage) {
1431
- await ((_18 = api.output) === null || _18 === void 0 ? void 0 : _18.call(api, errorMessage, null));
1457
+ await ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1432
1458
  }
1433
1459
  // Set default node as next node
1434
1460
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1440,7 +1466,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1440
1466
  if (!errorHandlingGotoTarget) {
1441
1467
  throw new Error("GoTo Target is required");
1442
1468
  }
1443
- if (!((_19 = api.checkThink) === null || _19 === void 0 ? void 0 : _19.call(api, nodeId))) {
1469
+ if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1444
1470
  api.resetNextNodes();
1445
1471
  await api.executeFlow({
1446
1472
  flowNode: {
@@ -60,8 +60,8 @@ exports.AI_AGENT_JOB_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)
60
60
  },
61
61
  {
62
62
  key: "mcpServerUrl",
63
- label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_SSE_URL__LABEL",
64
- description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_SSE_URL__DESCRIPTION",
63
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_URL__LABEL",
64
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__MCP_SERVER_URL__DESCRIPTION",
65
65
  type: "cognigyText",
66
66
  params: {
67
67
  required: true,