@cognigy/rest-api-client 4.95.0 → 4.96.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/shared/charts/descriptors/analytics/addMemory.js +1 -1
  3. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +9 -1
  4. package/build/shared/charts/descriptors/service/GPTPrompt.js +10 -2
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +158 -37
  6. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +4 -1
  7. package/build/shared/charts/descriptors/voice/nodes/bargeIn.js +1 -1
  8. package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +1 -1
  9. package/build/shared/charts/descriptors/voice/nodes/dtmf.js +1 -1
  10. package/build/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  11. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
  12. package/build/shared/charts/descriptors/voice/nodes/noUserInput.js +1 -1
  13. package/build/shared/charts/descriptors/voice/nodes/play.js +1 -1
  14. package/build/shared/charts/descriptors/voice/nodes/sendMetadata.js +1 -1
  15. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +1 -1
  16. package/build/shared/charts/descriptors/voice/nodes/transfer.js +1 -1
  17. package/build/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +1 -1
  18. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +1 -1
  19. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +1 -1
  20. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -1
  21. package/build/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -1
  22. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -1
  23. package/build/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -1
  24. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +1 -1
  25. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +47 -2
  26. package/build/shared/constants.js +10 -1
  27. package/build/shared/helper/nlu/textCleaner.js +10 -2
  28. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  29. package/build/shared/interfaces/messageAPI/endpoints.js +2 -0
  30. package/build/shared/interfaces/messageAPI/handover.js +3 -3
  31. package/build/shared/interfaces/resources/IAiAgent.js +10 -0
  32. package/build/shared/interfaces/resources/intent/IDefaultReply.js +0 -1
  33. package/dist/esm/shared/charts/descriptors/analytics/addMemory.js +1 -1
  34. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +9 -1
  35. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +10 -2
  36. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +158 -37
  37. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +4 -1
  38. package/dist/esm/shared/charts/descriptors/voice/nodes/bargeIn.js +1 -1
  39. package/dist/esm/shared/charts/descriptors/voice/nodes/continuousAsr.js +1 -1
  40. package/dist/esm/shared/charts/descriptors/voice/nodes/dtmf.js +1 -1
  41. package/dist/esm/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  42. package/dist/esm/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
  43. package/dist/esm/shared/charts/descriptors/voice/nodes/noUserInput.js +1 -1
  44. package/dist/esm/shared/charts/descriptors/voice/nodes/play.js +1 -1
  45. package/dist/esm/shared/charts/descriptors/voice/nodes/sendMetadata.js +1 -1
  46. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +1 -1
  47. package/dist/esm/shared/charts/descriptors/voice/nodes/transfer.js +1 -1
  48. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +1 -1
  49. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/hangup.js +1 -1
  50. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +1 -1
  51. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -1
  53. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -1
  54. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -1
  55. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +1 -1
  56. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +47 -2
  57. package/dist/esm/shared/constants.js +9 -0
  58. package/dist/esm/shared/helper/nlu/textCleaner.js +10 -2
  59. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  60. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +2 -0
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +2 -2
  62. package/dist/esm/shared/interfaces/resources/IAiAgent.js +10 -0
  63. package/dist/esm/shared/interfaces/resources/intent/IDefaultReply.js +0 -1
  64. package/package.json +2 -2
  65. package/types/index.d.ts +80 -6
package/CHANGELOG.md CHANGED
@@ -1,8 +1,18 @@
1
+ # 4.96.0
2
+ Released: March 11th, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v4.96.0
5
+
1
6
  # 4.95.0
2
7
  Released: February 19th, 2025
3
8
 
4
9
  Released state of package up to date with Cognigy.AI v4.95.0
5
10
 
11
+ # 4.94.0
12
+ Released: February 07th, 2025
13
+
14
+ Released state of package up to date with Cognigy.AI v4.94.0
15
+
6
16
  # 4.93.0
7
17
  Released: Jan 22nd, 2025
8
18
 
@@ -30,7 +30,7 @@ exports.ADD_MEMORY = (0, createNodeDescriptor_1.createNodeDescriptor)({
30
30
  var _a, _b;
31
31
  const { memory: memoryText } = config;
32
32
  const { api } = cognigy;
33
- if (memoryText) {
33
+ if (memoryText !== undefined && memoryText !== null && memoryText !== "") {
34
34
  let memoryToAdd;
35
35
  if (typeof memoryText === 'string') {
36
36
  memoryToAdd = memoryText;
@@ -5,6 +5,7 @@ exports.SEARCH_EXTRACT_OUTPUT = void 0;
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
6
  const logic_1 = require("../logic");
7
7
  const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
8
+ const crypto_1 = require("crypto");
8
9
  const errors_1 = require("../../../errors");
9
10
  /**
10
11
  * Returns the simplified english name for a language given a language code
@@ -761,6 +762,8 @@ New: `;
761
762
  if (documents && (mode !== "s")) {
762
763
  // check if we received streamed output at all
763
764
  let streamedOutput = false;
765
+ const isStreamingChannel = input.channel === "webchat3";
766
+ const _messageId = (0, crypto_1.randomUUID)();
764
767
  const promptData = {
765
768
  prompt,
766
769
  temperature,
@@ -773,9 +776,14 @@ New: `;
773
776
  stream: outputMode === "stream" && mode === "seo",
774
777
  streamOnDataHandler: (text) => {
775
778
  streamedOutput = true;
776
- api.output(text, null);
779
+ api.output(text, {
780
+ _cognigy: {
781
+ _messageId,
782
+ }
783
+ });
777
784
  },
778
785
  streamStopTokens,
786
+ preventNewLineRemoval: isStreamingChannel ? true : false,
779
787
  // set the detailed results to true to get the token usage
780
788
  detailedResults: true
781
789
  };
@@ -4,6 +4,7 @@ exports.GPT_PROMPT = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
6
  const logic_1 = require("../logic");
7
+ const crypto_1 = require("crypto");
7
8
  const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
8
9
  const errors_1 = require("../../../errors");
9
10
  exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
@@ -626,6 +627,8 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
626
627
  }
627
628
  };
628
629
  try {
630
+ const isStreamingChannel = input.channel === "webchat3";
631
+ const _messageId = (0, crypto_1.randomUUID)();
629
632
  const data = {
630
633
  prompt,
631
634
  temperature,
@@ -637,13 +640,18 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
637
640
  useCase: "promptNode",
638
641
  stream: storeLocation === "stream",
639
642
  streamOnDataHandler: (text) => {
640
- text = text && text.trim();
643
+ text = isStreamingChannel ? text : text.trim();
641
644
  if (text) {
642
- api.output(text, null);
645
+ api.output(text, {
646
+ _cognigy: {
647
+ _messageId,
648
+ }
649
+ });
643
650
  }
644
651
  },
645
652
  streamStopTokens,
646
653
  streamStopTokenOverrides,
654
+ preventNewLineRemoval: isStreamingChannel ? true : false,
647
655
  // set to true in order to get token usage
648
656
  detailedResults: true,
649
657
  seed: Number(seed) ? Number(seed) : undefined,
@@ -14,6 +14,10 @@ Object.defineProperty(exports, "__esModule", { value: true });
14
14
  exports.AI_AGENT_JOB = void 0;
15
15
  /* Custom modules */
16
16
  const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
17
+ const crypto_1 = require("crypto");
18
+ const setSessionConfig_mapper_1 = require("../../voice/mappers/setSessionConfig.mapper");
19
+ const setSessionConfig_mapper_2 = require("../../voice/mappers/setSessionConfig.mapper");
20
+ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDebugMode");
17
21
  const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
18
22
  const createSystemMessage_1 = require("./helpers/createSystemMessage");
19
23
  const generateSearchPrompt_1 = require("./helpers/generateSearchPrompt");
@@ -177,7 +181,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
177
181
  },
178
182
  {
179
183
  key: "memoryContextInjection",
180
- type: "cognigyText",
184
+ type: "cognigyLLMText",
181
185
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONTEXT_INJECTION__LABEL",
182
186
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONTEXT_INJECTION__DESCRIPTION",
183
187
  defaultValue: "[[snippet-eyJ0eXBlIjoiY29udGV4dCIsImxhYmVsIjoiU2hvcnQtVGVybSBNZW1vcnkiLCJzY3JpcHQiOiJjb250ZXh0LnNob3J0VGVybU1lbW9yeSJ9]]",
@@ -623,6 +627,60 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
623
627
  }
624
628
  ],
625
629
  },
630
+ },
631
+ {
632
+ key: "voiceSetting",
633
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__LABEL",
634
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__DESCRIPTION",
635
+ type: "select",
636
+ defaultValue: "inheritFromAiAgent",
637
+ params: {
638
+ options: [
639
+ {
640
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__OPTIONS__INHERIT__LABEL",
641
+ value: "inheritFromAiAgent"
642
+ },
643
+ {
644
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__OPTIONS__USE_JOB_VOICE__LABEL",
645
+ value: "jobVoice"
646
+ }
647
+ ]
648
+ }
649
+ },
650
+ {
651
+ key: "ttsVendor",
652
+ defaultValue: "",
653
+ type: "ttsSelect",
654
+ label: "_unused_",
655
+ description: "_unused_",
656
+ params: {
657
+ languageKey: "config.ttsLanguage",
658
+ modelKey: "config.ttsModel",
659
+ voiceKey: "config.ttsVoice"
660
+ },
661
+ condition: {
662
+ key: "voiceSetting",
663
+ value: "jobVoice"
664
+ }
665
+ },
666
+ {
667
+ key: "ttsLanguage",
668
+ type: "ttsSelect",
669
+ defaultValue: "",
670
+ label: "_unused_",
671
+ },
672
+ {
673
+ key: "ttsModel",
674
+ type: "ttsSelect",
675
+ label: "_unused_",
676
+ description: "_unused_",
677
+ defaultValue: "",
678
+ },
679
+ {
680
+ key: "ttsVoice",
681
+ type: "ttsSelect",
682
+ defaultValue: "",
683
+ label: "_unused_",
626
684
  }
627
685
  ],
628
686
  sections: [
@@ -666,6 +724,15 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
666
724
  "streamStopTokens"
667
725
  ]
668
726
  },
727
+ {
728
+ key: "voice",
729
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__VOICE__LABEL",
730
+ defaultCollapsed: true,
731
+ fields: [
732
+ "voiceSetting",
733
+ "ttsVendor",
734
+ ],
735
+ },
669
736
  {
670
737
  key: "toolSettings",
671
738
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
@@ -727,6 +794,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
727
794
  { type: "section", key: "memory" },
728
795
  { type: "section", key: "knowledgeSearch" },
729
796
  { type: "section", key: "storage" },
797
+ { type: "section", key: "voice" },
730
798
  { type: "section", key: "toolSettings" },
731
799
  { type: "section", key: "imageHandling" },
732
800
  { type: "section", key: "advanced" },
@@ -735,23 +803,62 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
735
803
  ],
736
804
  tags: ["ai", "aiAgent"],
737
805
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
738
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9;
806
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10;
739
807
  const { api, context, input, profile, flowReferenceId } = cognigy;
740
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, } = config;
808
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
741
809
  try {
742
810
  if (!aiAgent) {
743
811
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
744
812
  }
745
- const _10 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _10, cleanedProfile = __rest(_10, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
813
+ // Determine if the job should inherit voice settings from the AI Agent
814
+ if (config.voiceSetting === "inheritFromAiAgent") {
815
+ if (aiAgent.enableVoiceConfigs && ((_a = aiAgent.voiceConfigs) === null || _a === void 0 ? void 0 : _a.ttsVendor) && aiAgent.voiceConfigs.ttsVendor !== "none") {
816
+ // Inherit voice configurations from the AI Agent if valid
817
+ Object.assign(config, {
818
+ ttsVendor: aiAgent.voiceConfigs.ttsVendor,
819
+ ttsLanguage: aiAgent.voiceConfigs.ttsLanguage,
820
+ ttsModel: aiAgent.voiceConfigs.ttsModel,
821
+ ttsVoice: aiAgent.voiceConfigs.ttsVoice,
822
+ });
823
+ }
824
+ else {
825
+ // Reset voice settings if AI Agent lacks configurations
826
+ Object.assign(config, {
827
+ ttsVendor: "none",
828
+ ttsLanguage: "",
829
+ ttsModel: "",
830
+ ttsVoice: "",
831
+ });
832
+ }
833
+ }
834
+ // After inheriting or directly setting voice configurations from the AI Agent wizard,
835
+ // we need to apply these settings to the voice gateway
836
+ // Only proceed if we have a valid TTS vendor configured
837
+ if (config.ttsVendor && config.ttsVendor !== "none") {
838
+ try {
839
+ const voiceSettings = (0, setSessionConfig_mapper_2.voiceConfigParamsToVoiceSettings)(config, api);
840
+ const payload = setSessionConfig_mapper_1.setSessionConfig.handleVGInput(voiceSettings, sessionParams || {}, api);
841
+ if (payload && Object.keys(payload).length > 0) {
842
+ api.say(null, {
843
+ _cognigy: payload,
844
+ });
845
+ }
846
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, voiceSettings);
847
+ }
848
+ catch (error) {
849
+ throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
850
+ }
851
+ }
852
+ const _11 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _11, cleanedProfile = __rest(_11, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
746
853
  const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
747
854
  /**
748
855
  * ----- Knowledge Search Section -----
749
856
  */
750
857
  let knowledgeSearchResponseData;
751
858
  const sessionState = await api.loadSessionState();
752
- const lastToolCall = (_a = sessionState.lastToolCall) === null || _a === void 0 ? void 0 : _a.toolCall;
859
+ const lastToolCall = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.toolCall;
753
860
  if (knowledgeSearchBehavior === "always" ||
754
- (knowledgeSearchBehavior === "onDemand" && ((_b = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _b === void 0 ? void 0 : _b.name) === "retrieve_knowledge")) {
861
+ (knowledgeSearchBehavior === "onDemand" && ((_c = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _c === void 0 ? void 0 : _c.name) === "retrieve_knowledge")) {
755
862
  const knowledgeStoreIds = [];
756
863
  if (knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) {
757
864
  knowledgeStoreIds.push(aiAgent.knowledgeReferenceId);
@@ -759,8 +866,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
759
866
  if (knowledgeSearchJobKnowledge && knowledgeSearchJobStore) {
760
867
  knowledgeStoreIds.push(knowledgeSearchJobStore);
761
868
  }
762
- if (knowledgeStoreIds.length > 0 && (input.text || ((_d = (_c = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _c === void 0 ? void 0 : _c.arguments) === null || _d === void 0 ? void 0 : _d.generated_prompt))) {
763
- let query = ((_f = (_e = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _e === void 0 ? void 0 : _e.arguments) === null || _f === void 0 ? void 0 : _f.generated_prompt) || input.text;
869
+ if (knowledgeStoreIds.length > 0 && (input.text || ((_e = (_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.arguments) === null || _e === void 0 ? void 0 : _e.generated_prompt))) {
870
+ let query = ((_g = (_f = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _f === void 0 ? void 0 : _f.arguments) === null || _g === void 0 ? void 0 : _g.generated_prompt) || input.text;
764
871
  if (knowledgeSearchBehavior === "always" && knowledgeSearchGenerateSearchPrompt) {
765
872
  const generatedSearchPrompt = await (0, generateSearchPrompt_1.generateSearchPrompt)({ api, userMemory, llmProviderReferenceId, debugLogTokenCount, memoryContextInjection });
766
873
  if (generatedSearchPrompt === null || generatedSearchPrompt === void 0 ? void 0 : generatedSearchPrompt.generated_prompt) {
@@ -776,10 +883,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
776
883
  knowledgeStoreIds,
777
884
  };
778
885
  if (knowledgeSearchBehavior === "onDemand") {
779
- const generated_buffer_phrase = (_h = (_g = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_buffer_phrase;
886
+ const generated_buffer_phrase = (_j = (_h = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _h === void 0 ? void 0 : _h.arguments) === null || _j === void 0 ? void 0 : _j.generated_buffer_phrase;
780
887
  if (generated_buffer_phrase) {
781
888
  // output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
782
- await ((_j = api.output) === null || _j === void 0 ? void 0 : _j.call(api, generated_buffer_phrase, {
889
+ await ((_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, generated_buffer_phrase, {
783
890
  _cognigy: {
784
891
  _preventTranscript: true
785
892
  }
@@ -809,7 +916,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
809
916
  if (query) {
810
917
  messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
811
918
  }
812
- if ((_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k.length) {
919
+ if ((_l = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _l === void 0 ? void 0 : _l.length) {
813
920
  knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
814
921
  var _a;
815
922
  messageLines.push(`\nTop ${index + 1}:`);
@@ -821,7 +928,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
821
928
  else {
822
929
  messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
823
930
  }
824
- (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
931
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
825
932
  }
826
933
  // Knowledge Search "onDemand" responds to a tool call
827
934
  if (knowledgeSearchBehavior === "onDemand") {
@@ -850,7 +957,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
850
957
  await api.addTranscriptStep(toolAnswer);
851
958
  }
852
959
  }
853
- if (((_m = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _m === void 0 ? void 0 : _m.name) === "retrieve_knowledge") {
960
+ if (((_o = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _o === void 0 ? void 0 : _o.name) === "retrieve_knowledge") {
854
961
  // remove the retrieve_knowledge toolCall from session state to avoid infinite loops
855
962
  api.updateSessionStateValues({
856
963
  lastToolCall: undefined
@@ -896,10 +1003,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
896
1003
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
897
1004
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
898
1005
  // Safety settings
899
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_o = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _o === void 0 ? void 0 : _o.avoidHarmfulContent}`);
900
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_p = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _p === void 0 ? void 0 : _p.avoidUngroundedContent}`);
901
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_q = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _q === void 0 ? void 0 : _q.avoidCopyrightInfringements}`);
902
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_r = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _r === void 0 ? void 0 : _r.preventJailbreakAndManipulation}`);
1006
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_p = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _p === void 0 ? void 0 : _p.avoidHarmfulContent}`);
1007
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_q = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _q === void 0 ? void 0 : _q.avoidUngroundedContent}`);
1008
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_r = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _r === void 0 ? void 0 : _r.avoidCopyrightInfringements}`);
1009
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_s = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _s === void 0 ? void 0 : _s.preventJailbreakAndManipulation}`);
903
1010
  // Tools
904
1011
  if (tools.length > 0) {
905
1012
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
@@ -942,7 +1049,18 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
942
1049
  knowledgeSearchTags.forEach(tag => messageLines.push(`- ${tag}`));
943
1050
  }
944
1051
  }
945
- (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1052
+ // Add voice configuration debug info
1053
+ if (config.voiceSetting === "inheritFromAiAgent") {
1054
+ messageLines.push("\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__INHERIT</b>");
1055
+ }
1056
+ else {
1057
+ messageLines.push("\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__JOB_VOICE</b>");
1058
+ }
1059
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VENDOR ${config.ttsVendor || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1060
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LANGUAGE ${config.ttsLanguage || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1061
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_MODEL ${config.ttsModel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1062
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1063
+ (_t = api.logDebugMessage) === null || _t === void 0 ? void 0 : _t.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
946
1064
  }
947
1065
  // keep this after the debug message since the "retrieve_knowledge" tool is implicit
948
1066
  // we only add this tool if at least one knowledge source is enabled
@@ -986,23 +1104,26 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
986
1104
  transcript.length > 0 &&
987
1105
  transcript[transcript.length - 1].role === transcripts_1.TranscriptRole.USER) {
988
1106
  const userInput = transcript[transcript.length - 1];
989
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_t = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _t === void 0 ? void 0 : _t.text) || input.text}`;
1107
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_u = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _u === void 0 ? void 0 : _u.text) || input.text}`;
990
1108
  transcript[transcript.length - 1].payload.text = enhancedInput;
991
1109
  }
1110
+ const isStreamingChannel = input.channel === "webchat3";
1111
+ const _messageId = (0, crypto_1.randomUUID)();
992
1112
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
993
1113
  // Temp fix to override the transcript if needed
994
- transcript: ((_u = context === null || context === void 0 ? void 0 : context._cognigy) === null || _u === void 0 ? void 0 : _u.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1114
+ transcript: ((_v = context === null || context === void 0 ? void 0 : context._cognigy) === null || _v === void 0 ? void 0 : _v.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
995
1115
  var _a;
996
- text = text.trim();
1116
+ text = isStreamingChannel ? text : text.trim();
997
1117
  if (text) {
998
1118
  // if we got text, we output it, but prevent it from being added to the transcript
999
1119
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
1000
1120
  _cognigy: {
1001
- _preventTranscript: true
1121
+ _preventTranscript: true,
1122
+ _messageId,
1002
1123
  }
1003
1124
  });
1004
1125
  }
1005
- }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"] }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
1126
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
1006
1127
  // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
1007
1128
  if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
1008
1129
  llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
@@ -1014,15 +1135,15 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1014
1135
  };
1015
1136
  }
1016
1137
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1017
- (_v = api.setAnalyticsData) === null || _v === void 0 ? void 0 : _v.call(api, "understood", "true");
1138
+ (_w = api.setAnalyticsData) === null || _w === void 0 ? void 0 : _w.call(api, "understood", "true");
1018
1139
  input.understood = true;
1019
- const fullLlmResult = await ((_w = api.runGenerativeAIPrompt) === null || _w === void 0 ? void 0 : _w.call(api, llmPromptOptions, "aiAgent"));
1140
+ const fullLlmResult = await ((_x = api.runGenerativeAIPrompt) === null || _x === void 0 ? void 0 : _x.call(api, llmPromptOptions, "aiAgent"));
1020
1141
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1021
1142
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1022
1143
  const tokenUsage = fullLlmResult.tokenUsage;
1023
1144
  // Send optional debug message with token usage
1024
1145
  if (debugLogTokenCount && tokenUsage) {
1025
- (_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1146
+ (_y = api.logDebugMessage) === null || _y === void 0 ? void 0 : _y.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1026
1147
  }
1027
1148
  // Identify if the result is a tool call
1028
1149
  // If response is a tool call, set next node for Tools
@@ -1043,13 +1164,13 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1043
1164
  });
1044
1165
  // if there are any parameters/arguments, add them to the input slots
1045
1166
  if (mainToolCall.function.arguments) {
1046
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_z = (_y = input.aiAgent) === null || _y === void 0 ? void 0 : _y.toolArgs) !== null && _z !== void 0 ? _z : {}), mainToolCall.function.arguments) });
1167
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_0 = (_z = input.aiAgent) === null || _z === void 0 ? void 0 : _z.toolArgs) !== null && _0 !== void 0 ? _0 : {}), mainToolCall.function.arguments) });
1047
1168
  }
1048
1169
  // Debug Message for Tool Calls, configured in the Tool Node
1049
1170
  if (toolChild === null || toolChild === void 0 ? void 0 : toolChild.config.debugMessage) {
1050
1171
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolChild.config.toolId}`];
1051
1172
  // Arguments / Parameters Slots
1052
- const slots = ((_0 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _0 === void 0 ? void 0 : _0.arguments) && Object.keys(mainToolCall.function.arguments);
1173
+ const slots = ((_1 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _1 === void 0 ? void 0 : _1.arguments) && Object.keys(mainToolCall.function.arguments);
1053
1174
  const hasSlots = slots && slots.length > 0;
1054
1175
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1055
1176
  if (hasSlots) {
@@ -1064,7 +1185,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1064
1185
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1065
1186
  });
1066
1187
  }
1067
- (_1 = api.logDebugMessage) === null || _1 === void 0 ? void 0 : _1.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1188
+ (_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1068
1189
  }
1069
1190
  if (toolChild) {
1070
1191
  api.setNextNode(toolChild.id);
@@ -1089,7 +1210,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1089
1210
  }
1090
1211
  // Optionally output the result immediately
1091
1212
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1092
- await ((_2 = api.output) === null || _2 === void 0 ? void 0 : _2.call(api, llmResult.result, {}));
1213
+ await ((_3 = api.output) === null || _3 === void 0 ? void 0 : _3.call(api, llmResult.result, {}));
1093
1214
  }
1094
1215
  // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
1095
1216
  if (llmResult.result && llmPromptOptions.stream) {
@@ -1106,7 +1227,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1106
1227
  }
1107
1228
  // Add response to Cognigy Input/Context for further usage
1108
1229
  if (storeLocation === "context") {
1109
- (_3 = api.addToContext) === null || _3 === void 0 ? void 0 : _3.call(api, contextKey, llmResult, "simple");
1230
+ (_4 = api.addToContext) === null || _4 === void 0 ? void 0 : _4.call(api, contextKey, llmResult, "simple");
1110
1231
  }
1111
1232
  else if (storeLocation === "input") {
1112
1233
  api.addToInput(inputKey, llmResult);
@@ -1119,14 +1240,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1119
1240
  const errorDetails = {
1120
1241
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1121
1242
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1122
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_4 = error.originalErrorDetails) === null || _4 === void 0 ? void 0 : _4.message),
1243
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_5 = error.originalErrorDetails) === null || _5 === void 0 ? void 0 : _5.message),
1123
1244
  };
1124
- (_5 = api.emitEvent) === null || _5 === void 0 ? void 0 : _5.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1245
+ (_6 = api.emitEvent) === null || _6 === void 0 ? void 0 : _6.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1125
1246
  if (logErrorToSystem) {
1126
- (_6 = api.log) === null || _6 === void 0 ? void 0 : _6.call(api, "error", JSON.stringify(errorDetails));
1247
+ (_7 = api.log) === null || _7 === void 0 ? void 0 : _7.call(api, "error", JSON.stringify(errorDetails));
1127
1248
  }
1128
1249
  if (errorHandling !== "stop") {
1129
- (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1250
+ (_8 = api.logDebugError) === null || _8 === void 0 ? void 0 : _8.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1130
1251
  }
1131
1252
  if (storeErrorInInput) {
1132
1253
  input.aiAgent = input.aiAgent || {};
@@ -1135,7 +1256,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1135
1256
  if (errorHandling === "continue") {
1136
1257
  // output the timeout message
1137
1258
  if (errorMessage) {
1138
- await ((_8 = api.output) === null || _8 === void 0 ? void 0 : _8.call(api, errorMessage, null));
1259
+ await ((_9 = api.output) === null || _9 === void 0 ? void 0 : _9.call(api, errorMessage, null));
1139
1260
  }
1140
1261
  // Set default node as next node
1141
1262
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1147,7 +1268,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1147
1268
  if (!errorHandlingGotoTarget) {
1148
1269
  throw new Error("GoTo Target is required");
1149
1270
  }
1150
- if (!((_9 = api.checkThink) === null || _9 === void 0 ? void 0 : _9.call(api, nodeId))) {
1271
+ if (!((_10 = api.checkThink) === null || _10 === void 0 ? void 0 : _10.call(api, nodeId))) {
1151
1272
  api.resetNextNodes();
1152
1273
  await api.executeFlow({
1153
1274
  flowNode: {
@@ -32,7 +32,7 @@ exports.transfer = {
32
32
  }
33
33
  },
34
34
  handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, anchorMedia) {
35
- const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, sttLabel } = transferParam;
35
+ const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
36
36
  const payload = {
37
37
  _voiceGateway2: {
38
38
  json: {}
@@ -52,6 +52,9 @@ exports.transfer = {
52
52
  target: [],
53
53
  timeout: dialTimeout !== null && dialTimeout !== void 0 ? dialTimeout : 60
54
54
  };
55
+ if (timeLimit && timeLimit > 0) {
56
+ dialVerb.timeLimit = timeLimit;
57
+ }
55
58
  if (amdEnabled) {
56
59
  dialVerb.amd = {
57
60
  actionHook: "amd"
@@ -11,7 +11,7 @@ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDe
11
11
  exports.BARGE_IN = (0, createNodeDescriptor_1.createNodeDescriptor)({
12
12
  type: "bargeIn",
13
13
  defaultLabel: "Barge In - Config",
14
- tags: ["voice"],
14
+ tags: [""],
15
15
  appearance: {
16
16
  color: design_1.nodeColor,
17
17
  },
@@ -11,7 +11,7 @@ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDe
11
11
  exports.CONTINUOUS_ASR = (0, createNodeDescriptor_1.createNodeDescriptor)({
12
12
  type: "continuousASR",
13
13
  defaultLabel: "Continuous ASR - Config",
14
- tags: ["voice"],
14
+ tags: [""],
15
15
  appearance: {
16
16
  color: design_1.nodeColor,
17
17
  },
@@ -11,7 +11,7 @@ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDe
11
11
  exports.DTMF = (0, createNodeDescriptor_1.createNodeDescriptor)({
12
12
  type: "dtmf",
13
13
  defaultLabel: "DTMF Collect - Config",
14
- tags: ["voice"],
14
+ tags: [""],
15
15
  appearance: {
16
16
  color: design_1.nodeColor,
17
17
  },
@@ -9,7 +9,7 @@ const design_1 = require("../utils/design");
9
9
  exports.HANG_UP = (0, createNodeDescriptor_1.createNodeDescriptor)({
10
10
  type: "hangup",
11
11
  defaultLabel: "Hang Up",
12
- tags: ['voice'],
12
+ tags: [''],
13
13
  preview: {
14
14
  key: "hangupReason",
15
15
  type: "text"
@@ -10,7 +10,7 @@ const mapper = new muteSpeechInput_mapper_1.MuteSpeechInputMapper();
10
10
  exports.MUTE_SPEECH_INPUT = (0, createNodeDescriptor_1.createNodeDescriptor)({
11
11
  type: "muteSpeechInput",
12
12
  defaultLabel: "Mute Speech Input",
13
- tags: ['voice'],
13
+ tags: [''],
14
14
  summary: "UI__NODE_EDITOR__MUTE_SPEECH_INPUT__SUMMARY",
15
15
  preview: {
16
16
  key: "muteSpeechInput",
@@ -11,7 +11,7 @@ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDe
11
11
  exports.USER_INPUT_TIMEOUT = (0, createNodeDescriptor_1.createNodeDescriptor)({
12
12
  type: "noUserInput",
13
13
  defaultLabel: "User Input Timeout - Config",
14
- tags: ["voice"],
14
+ tags: [""],
15
15
  appearance: {
16
16
  color: design_1.nodeColor,
17
17
  },
@@ -19,7 +19,7 @@ const fields = (setSessionConfig_1.voiceConfigFields.map(field => {
19
19
  exports.PLAY = (0, createNodeDescriptor_1.createNodeDescriptor)({
20
20
  type: "play",
21
21
  defaultLabel: "Play URL",
22
- tags: ['voice'],
22
+ tags: [''],
23
23
  preview: {
24
24
  key: "url",
25
25
  type: "text"
@@ -10,7 +10,7 @@ exports.SEND_METADATA = (0, createNodeDescriptor_1.createNodeDescriptor)({
10
10
  type: "sendMetadata",
11
11
  defaultLabel: "Send Metadata",
12
12
  summary: "UI__NODE_EDITOR__SEND_METADATA__SUMMARY",
13
- tags: ["voice"],
13
+ tags: [""],
14
14
  fields: [
15
15
  {
16
16
  key: "metadata",
@@ -293,7 +293,7 @@ exports.SESSION_SPEECH_PARAMETERS = (0, createNodeDescriptor_1.createNodeDescrip
293
293
  appearance: {
294
294
  color: design_1.nodeColor,
295
295
  },
296
- tags: ["voice"],
296
+ tags: [""],
297
297
  fields: exports.voiceConfigFields,
298
298
  sections: [
299
299
  {
@@ -11,7 +11,7 @@ const logFullConfigToDebugMode_1 = require("../../../../helper/logFullConfigToDe
11
11
  exports.TRANSFER_VOICE = (0, createNodeDescriptor_1.createNodeDescriptor)({
12
12
  type: "transfer",
13
13
  defaultLabel: "Transfer",
14
- tags: ["voice"],
14
+ tags: [""],
15
15
  preview: {
16
16
  key: "referTo",
17
17
  type: "text",
@@ -16,7 +16,7 @@ exports.dtmfNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
16
16
  appearance: {
17
17
  color: design_1.nodeColor
18
18
  },
19
- tags: ["vg"],
19
+ tags: ["voice"],
20
20
  fields: [
21
21
  {
22
22
  key: "dtmf",