@cognigy/rest-api-client 4.95.0 → 4.96.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/shared/charts/descriptors/analytics/addMemory.js +1 -1
  3. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +9 -1
  4. package/build/shared/charts/descriptors/service/GPTPrompt.js +10 -2
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +158 -37
  6. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +4 -1
  7. package/build/shared/charts/descriptors/voice/nodes/bargeIn.js +1 -1
  8. package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +1 -1
  9. package/build/shared/charts/descriptors/voice/nodes/dtmf.js +1 -1
  10. package/build/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  11. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
  12. package/build/shared/charts/descriptors/voice/nodes/noUserInput.js +1 -1
  13. package/build/shared/charts/descriptors/voice/nodes/play.js +1 -1
  14. package/build/shared/charts/descriptors/voice/nodes/sendMetadata.js +1 -1
  15. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +1 -1
  16. package/build/shared/charts/descriptors/voice/nodes/transfer.js +1 -1
  17. package/build/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +1 -1
  18. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +1 -1
  19. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +1 -1
  20. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -1
  21. package/build/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -1
  22. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -1
  23. package/build/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -1
  24. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +1 -1
  25. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +47 -2
  26. package/build/shared/constants.js +10 -1
  27. package/build/shared/helper/nlu/textCleaner.js +10 -2
  28. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  29. package/build/shared/interfaces/messageAPI/endpoints.js +2 -0
  30. package/build/shared/interfaces/messageAPI/handover.js +3 -3
  31. package/build/shared/interfaces/resources/IAiAgent.js +10 -0
  32. package/build/shared/interfaces/resources/intent/IDefaultReply.js +0 -1
  33. package/dist/esm/shared/charts/descriptors/analytics/addMemory.js +1 -1
  34. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +9 -1
  35. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +10 -2
  36. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +158 -37
  37. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +4 -1
  38. package/dist/esm/shared/charts/descriptors/voice/nodes/bargeIn.js +1 -1
  39. package/dist/esm/shared/charts/descriptors/voice/nodes/continuousAsr.js +1 -1
  40. package/dist/esm/shared/charts/descriptors/voice/nodes/dtmf.js +1 -1
  41. package/dist/esm/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
  42. package/dist/esm/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
  43. package/dist/esm/shared/charts/descriptors/voice/nodes/noUserInput.js +1 -1
  44. package/dist/esm/shared/charts/descriptors/voice/nodes/play.js +1 -1
  45. package/dist/esm/shared/charts/descriptors/voice/nodes/sendMetadata.js +1 -1
  46. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +1 -1
  47. package/dist/esm/shared/charts/descriptors/voice/nodes/transfer.js +1 -1
  48. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +1 -1
  49. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/hangup.js +1 -1
  50. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +1 -1
  51. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -1
  53. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -1
  54. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -1
  55. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +1 -1
  56. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +47 -2
  57. package/dist/esm/shared/constants.js +9 -0
  58. package/dist/esm/shared/helper/nlu/textCleaner.js +10 -2
  59. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  60. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +2 -0
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +2 -2
  62. package/dist/esm/shared/interfaces/resources/IAiAgent.js +10 -0
  63. package/dist/esm/shared/interfaces/resources/intent/IDefaultReply.js +0 -1
  64. package/package.json +2 -2
  65. package/types/index.d.ts +80 -6
@@ -1,6 +1,10 @@
1
1
  import { __awaiter, __rest } from "tslib";
2
2
  /* Custom modules */
3
3
  import { createNodeDescriptor } from "../../../createNodeDescriptor";
4
+ import { randomUUID } from 'crypto';
5
+ import { setSessionConfig } from "../../voice/mappers/setSessionConfig.mapper";
6
+ import { voiceConfigParamsToVoiceSettings } from "../../voice/mappers/setSessionConfig.mapper";
7
+ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebugMode";
4
8
  import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
5
9
  import { createSystemMessage, validateToolId } from "./helpers/createSystemMessage";
6
10
  import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
@@ -164,7 +168,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
164
168
  },
165
169
  {
166
170
  key: "memoryContextInjection",
167
- type: "cognigyText",
171
+ type: "cognigyLLMText",
168
172
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONTEXT_INJECTION__LABEL",
169
173
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONTEXT_INJECTION__DESCRIPTION",
170
174
  defaultValue: "[[snippet-eyJ0eXBlIjoiY29udGV4dCIsImxhYmVsIjoiU2hvcnQtVGVybSBNZW1vcnkiLCJzY3JpcHQiOiJjb250ZXh0LnNob3J0VGVybU1lbW9yeSJ9]]",
@@ -610,6 +614,60 @@ export const AI_AGENT_JOB = createNodeDescriptor({
610
614
  }
611
615
  ],
612
616
  },
617
+ },
618
+ {
619
+ key: "voiceSetting",
620
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__LABEL",
621
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__DESCRIPTION",
622
+ type: "select",
623
+ defaultValue: "inheritFromAiAgent",
624
+ params: {
625
+ options: [
626
+ {
627
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__OPTIONS__INHERIT__LABEL",
628
+ value: "inheritFromAiAgent"
629
+ },
630
+ {
631
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__VOICE_SETTING__OPTIONS__USE_JOB_VOICE__LABEL",
632
+ value: "jobVoice"
633
+ }
634
+ ]
635
+ }
636
+ },
637
+ {
638
+ key: "ttsVendor",
639
+ defaultValue: "",
640
+ type: "ttsSelect",
641
+ label: "_unused_",
642
+ description: "_unused_",
643
+ params: {
644
+ languageKey: "config.ttsLanguage",
645
+ modelKey: "config.ttsModel",
646
+ voiceKey: "config.ttsVoice"
647
+ },
648
+ condition: {
649
+ key: "voiceSetting",
650
+ value: "jobVoice"
651
+ }
652
+ },
653
+ {
654
+ key: "ttsLanguage",
655
+ type: "ttsSelect",
656
+ defaultValue: "",
657
+ label: "_unused_",
658
+ },
659
+ {
660
+ key: "ttsModel",
661
+ type: "ttsSelect",
662
+ label: "_unused_",
663
+ description: "_unused_",
664
+ defaultValue: "",
665
+ },
666
+ {
667
+ key: "ttsVoice",
668
+ type: "ttsSelect",
669
+ defaultValue: "",
670
+ label: "_unused_",
613
671
  }
614
672
  ],
615
673
  sections: [
@@ -653,6 +711,15 @@ export const AI_AGENT_JOB = createNodeDescriptor({
653
711
  "streamStopTokens"
654
712
  ]
655
713
  },
714
+ {
715
+ key: "voice",
716
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__VOICE__LABEL",
717
+ defaultCollapsed: true,
718
+ fields: [
719
+ "voiceSetting",
720
+ "ttsVendor",
721
+ ],
722
+ },
656
723
  {
657
724
  key: "toolSettings",
658
725
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
@@ -714,6 +781,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
714
781
  { type: "section", key: "memory" },
715
782
  { type: "section", key: "knowledgeSearch" },
716
783
  { type: "section", key: "storage" },
784
+ { type: "section", key: "voice" },
717
785
  { type: "section", key: "toolSettings" },
718
786
  { type: "section", key: "imageHandling" },
719
787
  { type: "section", key: "advanced" },
@@ -722,23 +790,62 @@ export const AI_AGENT_JOB = createNodeDescriptor({
722
790
  ],
723
791
  tags: ["ai", "aiAgent"],
724
792
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
725
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9;
793
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10;
726
794
  const { api, context, input, profile, flowReferenceId } = cognigy;
727
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, } = config;
795
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
728
796
  try {
729
797
  if (!aiAgent) {
730
798
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
731
799
  }
732
- const _10 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _10, cleanedProfile = __rest(_10, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
800
+ // Determine if the job should inherit voice settings from the AI Agent
801
+ if (config.voiceSetting === "inheritFromAiAgent") {
802
+ if (aiAgent.enableVoiceConfigs && ((_a = aiAgent.voiceConfigs) === null || _a === void 0 ? void 0 : _a.ttsVendor) && aiAgent.voiceConfigs.ttsVendor !== "none") {
803
+ // Inherit voice configurations from the AI Agent if valid
804
+ Object.assign(config, {
805
+ ttsVendor: aiAgent.voiceConfigs.ttsVendor,
806
+ ttsLanguage: aiAgent.voiceConfigs.ttsLanguage,
807
+ ttsModel: aiAgent.voiceConfigs.ttsModel,
808
+ ttsVoice: aiAgent.voiceConfigs.ttsVoice,
809
+ });
810
+ }
811
+ else {
812
+ // Reset voice settings if AI Agent lacks configurations
813
+ Object.assign(config, {
814
+ ttsVendor: "none",
815
+ ttsLanguage: "",
816
+ ttsModel: "",
817
+ ttsVoice: "",
818
+ });
819
+ }
820
+ }
821
+ // After inheriting or directly setting voice configurations from the AI Agent wizard,
822
+ // we need to apply these settings to the voice gateway
823
+ // Only proceed if we have a valid TTS vendor configured
824
+ if (config.ttsVendor && config.ttsVendor !== "none") {
825
+ try {
826
+ const voiceSettings = voiceConfigParamsToVoiceSettings(config, api);
827
+ const payload = setSessionConfig.handleVGInput(voiceSettings, sessionParams || {}, api);
828
+ if (payload && Object.keys(payload).length > 0) {
829
+ api.say(null, {
830
+ _cognigy: payload,
831
+ });
832
+ }
833
+ logFullConfigToDebugMode(cognigy, voiceSettings);
834
+ }
835
+ catch (error) {
836
+ throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
837
+ }
838
+ }
839
+ const _11 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _11, cleanedProfile = __rest(_11, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
733
840
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
734
841
  /**
735
842
  * ----- Knowledge Search Section -----
736
843
  */
737
844
  let knowledgeSearchResponseData;
738
845
  const sessionState = yield api.loadSessionState();
739
- const lastToolCall = (_a = sessionState.lastToolCall) === null || _a === void 0 ? void 0 : _a.toolCall;
846
+ const lastToolCall = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.toolCall;
740
847
  if (knowledgeSearchBehavior === "always" ||
741
- (knowledgeSearchBehavior === "onDemand" && ((_b = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _b === void 0 ? void 0 : _b.name) === "retrieve_knowledge")) {
848
+ (knowledgeSearchBehavior === "onDemand" && ((_c = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _c === void 0 ? void 0 : _c.name) === "retrieve_knowledge")) {
742
849
  const knowledgeStoreIds = [];
743
850
  if (knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) {
744
851
  knowledgeStoreIds.push(aiAgent.knowledgeReferenceId);
@@ -746,8 +853,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
746
853
  if (knowledgeSearchJobKnowledge && knowledgeSearchJobStore) {
747
854
  knowledgeStoreIds.push(knowledgeSearchJobStore);
748
855
  }
749
- if (knowledgeStoreIds.length > 0 && (input.text || ((_d = (_c = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _c === void 0 ? void 0 : _c.arguments) === null || _d === void 0 ? void 0 : _d.generated_prompt))) {
750
- let query = ((_f = (_e = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _e === void 0 ? void 0 : _e.arguments) === null || _f === void 0 ? void 0 : _f.generated_prompt) || input.text;
856
+ if (knowledgeStoreIds.length > 0 && (input.text || ((_e = (_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.arguments) === null || _e === void 0 ? void 0 : _e.generated_prompt))) {
857
+ let query = ((_g = (_f = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _f === void 0 ? void 0 : _f.arguments) === null || _g === void 0 ? void 0 : _g.generated_prompt) || input.text;
751
858
  if (knowledgeSearchBehavior === "always" && knowledgeSearchGenerateSearchPrompt) {
752
859
  const generatedSearchPrompt = yield generateSearchPrompt({ api, userMemory, llmProviderReferenceId, debugLogTokenCount, memoryContextInjection });
753
860
  if (generatedSearchPrompt === null || generatedSearchPrompt === void 0 ? void 0 : generatedSearchPrompt.generated_prompt) {
@@ -763,10 +870,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
763
870
  knowledgeStoreIds,
764
871
  };
765
872
  if (knowledgeSearchBehavior === "onDemand") {
766
- const generated_buffer_phrase = (_h = (_g = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_buffer_phrase;
873
+ const generated_buffer_phrase = (_j = (_h = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _h === void 0 ? void 0 : _h.arguments) === null || _j === void 0 ? void 0 : _j.generated_buffer_phrase;
767
874
  if (generated_buffer_phrase) {
768
875
  // output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
769
- yield ((_j = api.output) === null || _j === void 0 ? void 0 : _j.call(api, generated_buffer_phrase, {
876
+ yield ((_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, generated_buffer_phrase, {
770
877
  _cognigy: {
771
878
  _preventTranscript: true
772
879
  }
@@ -796,7 +903,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
796
903
  if (query) {
797
904
  messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
798
905
  }
799
- if ((_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k.length) {
906
+ if ((_l = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _l === void 0 ? void 0 : _l.length) {
800
907
  knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
801
908
  var _a;
802
909
  messageLines.push(`\nTop ${index + 1}:`);
@@ -808,7 +915,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
808
915
  else {
809
916
  messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
810
917
  }
811
- (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
918
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
812
919
  }
813
920
  // Knowledge Search "onDemand" responds to a tool call
814
921
  if (knowledgeSearchBehavior === "onDemand") {
@@ -837,7 +944,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
837
944
  yield api.addTranscriptStep(toolAnswer);
838
945
  }
839
946
  }
840
- if (((_m = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _m === void 0 ? void 0 : _m.name) === "retrieve_knowledge") {
947
+ if (((_o = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _o === void 0 ? void 0 : _o.name) === "retrieve_knowledge") {
841
948
  // remove the retrieve_knowledge toolCall from session state to avoid infinite loops
842
949
  api.updateSessionStateValues({
843
950
  lastToolCall: undefined
@@ -883,10 +990,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
883
990
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
884
991
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
885
992
  // Safety settings
886
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_o = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _o === void 0 ? void 0 : _o.avoidHarmfulContent}`);
887
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_p = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _p === void 0 ? void 0 : _p.avoidUngroundedContent}`);
888
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_q = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _q === void 0 ? void 0 : _q.avoidCopyrightInfringements}`);
889
- messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_r = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _r === void 0 ? void 0 : _r.preventJailbreakAndManipulation}`);
993
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_p = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _p === void 0 ? void 0 : _p.avoidHarmfulContent}`);
994
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_q = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _q === void 0 ? void 0 : _q.avoidUngroundedContent}`);
995
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_r = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _r === void 0 ? void 0 : _r.avoidCopyrightInfringements}`);
996
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_s = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _s === void 0 ? void 0 : _s.preventJailbreakAndManipulation}`);
890
997
  // Tools
891
998
  if (tools.length > 0) {
892
999
  messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
@@ -929,7 +1036,18 @@ export const AI_AGENT_JOB = createNodeDescriptor({
929
1036
  knowledgeSearchTags.forEach(tag => messageLines.push(`- ${tag}`));
930
1037
  }
931
1038
  }
932
- (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
1039
+ // Add voice configuration debug info
1040
+ if (config.voiceSetting === "inheritFromAiAgent") {
1041
+ messageLines.push("\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__INHERIT</b>");
1042
+ }
1043
+ else {
1044
+ messageLines.push("\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__JOB_VOICE</b>");
1045
+ }
1046
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VENDOR ${config.ttsVendor || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1047
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LANGUAGE ${config.ttsLanguage || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1048
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_MODEL ${config.ttsModel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1049
+ messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
1050
+ (_t = api.logDebugMessage) === null || _t === void 0 ? void 0 : _t.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
933
1051
  }
934
1052
  // keep this after the debug message since the "retrieve_knowledge" tool is implicit
935
1053
  // we only add this tool if at least one knowledge source is enabled
@@ -973,23 +1091,26 @@ export const AI_AGENT_JOB = createNodeDescriptor({
973
1091
  transcript.length > 0 &&
974
1092
  transcript[transcript.length - 1].role === TranscriptRole.USER) {
975
1093
  const userInput = transcript[transcript.length - 1];
976
- const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_t = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _t === void 0 ? void 0 : _t.text) || input.text}`;
1094
+ const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_u = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _u === void 0 ? void 0 : _u.text) || input.text}`;
977
1095
  transcript[transcript.length - 1].payload.text = enhancedInput;
978
1096
  }
1097
+ const isStreamingChannel = input.channel === "webchat3";
1098
+ const _messageId = randomUUID();
979
1099
  const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
980
1100
  // Temp fix to override the transcript if needed
981
- transcript: ((_u = context === null || context === void 0 ? void 0 : context._cognigy) === null || _u === void 0 ? void 0 : _u.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1101
+ transcript: ((_v = context === null || context === void 0 ? void 0 : context._cognigy) === null || _v === void 0 ? void 0 : _v.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
982
1102
  var _a;
983
- text = text.trim();
1103
+ text = isStreamingChannel ? text : text.trim();
984
1104
  if (text) {
985
1105
  // if we got text, we output it, but prevent it from being added to the transcript
986
1106
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
987
1107
  _cognigy: {
988
- _preventTranscript: true
1108
+ _preventTranscript: true,
1109
+ _messageId,
989
1110
  }
990
1111
  });
991
1112
  }
992
- }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"] }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
1113
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
993
1114
  // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
994
1115
  if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
995
1116
  llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
@@ -1001,15 +1122,15 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1001
1122
  };
1002
1123
  }
1003
1124
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1004
- (_v = api.setAnalyticsData) === null || _v === void 0 ? void 0 : _v.call(api, "understood", "true");
1125
+ (_w = api.setAnalyticsData) === null || _w === void 0 ? void 0 : _w.call(api, "understood", "true");
1005
1126
  input.understood = true;
1006
- const fullLlmResult = yield ((_w = api.runGenerativeAIPrompt) === null || _w === void 0 ? void 0 : _w.call(api, llmPromptOptions, "aiAgent"));
1127
+ const fullLlmResult = yield ((_x = api.runGenerativeAIPrompt) === null || _x === void 0 ? void 0 : _x.call(api, llmPromptOptions, "aiAgent"));
1007
1128
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1008
1129
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1009
1130
  const tokenUsage = fullLlmResult.tokenUsage;
1010
1131
  // Send optional debug message with token usage
1011
1132
  if (debugLogTokenCount && tokenUsage) {
1012
- (_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1133
+ (_y = api.logDebugMessage) === null || _y === void 0 ? void 0 : _y.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1013
1134
  }
1014
1135
  // Identify if the result is a tool call
1015
1136
  // If response is a tool call, set next node for Tools
@@ -1030,13 +1151,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1030
1151
  });
1031
1152
  // if there are any parameters/arguments, add them to the input slots
1032
1153
  if (mainToolCall.function.arguments) {
1033
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_z = (_y = input.aiAgent) === null || _y === void 0 ? void 0 : _y.toolArgs) !== null && _z !== void 0 ? _z : {}), mainToolCall.function.arguments) });
1154
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_0 = (_z = input.aiAgent) === null || _z === void 0 ? void 0 : _z.toolArgs) !== null && _0 !== void 0 ? _0 : {}), mainToolCall.function.arguments) });
1034
1155
  }
1035
1156
  // Debug Message for Tool Calls, configured in the Tool Node
1036
1157
  if (toolChild === null || toolChild === void 0 ? void 0 : toolChild.config.debugMessage) {
1037
1158
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolChild.config.toolId}`];
1038
1159
  // Arguments / Parameters Slots
1039
- const slots = ((_0 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _0 === void 0 ? void 0 : _0.arguments) && Object.keys(mainToolCall.function.arguments);
1160
+ const slots = ((_1 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _1 === void 0 ? void 0 : _1.arguments) && Object.keys(mainToolCall.function.arguments);
1040
1161
  const hasSlots = slots && slots.length > 0;
1041
1162
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1042
1163
  if (hasSlots) {
@@ -1051,7 +1172,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1051
1172
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1052
1173
  });
1053
1174
  }
1054
- (_1 = api.logDebugMessage) === null || _1 === void 0 ? void 0 : _1.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1175
+ (_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1055
1176
  }
1056
1177
  if (toolChild) {
1057
1178
  api.setNextNode(toolChild.id);
@@ -1076,7 +1197,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1076
1197
  }
1077
1198
  // Optionally output the result immediately
1078
1199
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1079
- yield ((_2 = api.output) === null || _2 === void 0 ? void 0 : _2.call(api, llmResult.result, {}));
1200
+ yield ((_3 = api.output) === null || _3 === void 0 ? void 0 : _3.call(api, llmResult.result, {}));
1080
1201
  }
1081
1202
  // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
1082
1203
  if (llmResult.result && llmPromptOptions.stream) {
@@ -1093,7 +1214,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1093
1214
  }
1094
1215
  // Add response to Cognigy Input/Context for further usage
1095
1216
  if (storeLocation === "context") {
1096
- (_3 = api.addToContext) === null || _3 === void 0 ? void 0 : _3.call(api, contextKey, llmResult, "simple");
1217
+ (_4 = api.addToContext) === null || _4 === void 0 ? void 0 : _4.call(api, contextKey, llmResult, "simple");
1097
1218
  }
1098
1219
  else if (storeLocation === "input") {
1099
1220
  api.addToInput(inputKey, llmResult);
@@ -1106,14 +1227,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1106
1227
  const errorDetails = {
1107
1228
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1108
1229
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1109
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_4 = error.originalErrorDetails) === null || _4 === void 0 ? void 0 : _4.message),
1230
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_5 = error.originalErrorDetails) === null || _5 === void 0 ? void 0 : _5.message),
1110
1231
  };
1111
- (_5 = api.emitEvent) === null || _5 === void 0 ? void 0 : _5.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1232
+ (_6 = api.emitEvent) === null || _6 === void 0 ? void 0 : _6.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1112
1233
  if (logErrorToSystem) {
1113
- (_6 = api.log) === null || _6 === void 0 ? void 0 : _6.call(api, "error", JSON.stringify(errorDetails));
1234
+ (_7 = api.log) === null || _7 === void 0 ? void 0 : _7.call(api, "error", JSON.stringify(errorDetails));
1114
1235
  }
1115
1236
  if (errorHandling !== "stop") {
1116
- (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1237
+ (_8 = api.logDebugError) === null || _8 === void 0 ? void 0 : _8.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1117
1238
  }
1118
1239
  if (storeErrorInInput) {
1119
1240
  input.aiAgent = input.aiAgent || {};
@@ -1122,7 +1243,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1122
1243
  if (errorHandling === "continue") {
1123
1244
  // output the timeout message
1124
1245
  if (errorMessage) {
1125
- yield ((_8 = api.output) === null || _8 === void 0 ? void 0 : _8.call(api, errorMessage, null));
1246
+ yield ((_9 = api.output) === null || _9 === void 0 ? void 0 : _9.call(api, errorMessage, null));
1126
1247
  }
1127
1248
  // Set default node as next node
1128
1249
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1134,7 +1255,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1134
1255
  if (!errorHandlingGotoTarget) {
1135
1256
  throw new Error("GoTo Target is required");
1136
1257
  }
1137
- if (!((_9 = api.checkThink) === null || _9 === void 0 ? void 0 : _9.call(api, nodeId))) {
1258
+ if (!((_10 = api.checkThink) === null || _10 === void 0 ? void 0 : _10.call(api, nodeId))) {
1138
1259
  api.resetNextNodes();
1139
1260
  yield api.executeFlow({
1140
1261
  flowNode: {
@@ -29,7 +29,7 @@ export const transfer = {
29
29
  }
30
30
  },
31
31
  handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, anchorMedia) {
32
- const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, sttLabel } = transferParam;
32
+ const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
33
33
  const payload = {
34
34
  _voiceGateway2: {
35
35
  json: {}
@@ -49,6 +49,9 @@ export const transfer = {
49
49
  target: [],
50
50
  timeout: dialTimeout !== null && dialTimeout !== void 0 ? dialTimeout : 60
51
51
  };
52
+ if (timeLimit && timeLimit > 0) {
53
+ dialVerb.timeLimit = timeLimit;
54
+ }
52
55
  if (amdEnabled) {
53
56
  dialVerb.amd = {
54
57
  actionHook: "amd"
@@ -9,7 +9,7 @@ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebu
9
9
  export const BARGE_IN = createNodeDescriptor({
10
10
  type: "bargeIn",
11
11
  defaultLabel: "Barge In - Config",
12
- tags: ["voice"],
12
+ tags: [""],
13
13
  appearance: {
14
14
  color: nodeColor,
15
15
  },
@@ -9,7 +9,7 @@ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebu
9
9
  export const CONTINUOUS_ASR = createNodeDescriptor({
10
10
  type: "continuousASR",
11
11
  defaultLabel: "Continuous ASR - Config",
12
- tags: ["voice"],
12
+ tags: [""],
13
13
  appearance: {
14
14
  color: nodeColor,
15
15
  },
@@ -9,7 +9,7 @@ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebu
9
9
  export const DTMF = createNodeDescriptor({
10
10
  type: "dtmf",
11
11
  defaultLabel: "DTMF Collect - Config",
12
- tags: ["voice"],
12
+ tags: [""],
13
13
  appearance: {
14
14
  color: nodeColor,
15
15
  },
@@ -7,7 +7,7 @@ import { nodeColor } from "../utils/design";
7
7
  export const HANG_UP = createNodeDescriptor({
8
8
  type: "hangup",
9
9
  defaultLabel: "Hang Up",
10
- tags: ['voice'],
10
+ tags: [''],
11
11
  preview: {
12
12
  key: "hangupReason",
13
13
  type: "text"
@@ -8,7 +8,7 @@ const mapper = new MuteSpeechInputMapper();
8
8
  export const MUTE_SPEECH_INPUT = createNodeDescriptor({
9
9
  type: "muteSpeechInput",
10
10
  defaultLabel: "Mute Speech Input",
11
- tags: ['voice'],
11
+ tags: [''],
12
12
  summary: "UI__NODE_EDITOR__MUTE_SPEECH_INPUT__SUMMARY",
13
13
  preview: {
14
14
  key: "muteSpeechInput",
@@ -9,7 +9,7 @@ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebu
9
9
  export const USER_INPUT_TIMEOUT = createNodeDescriptor({
10
10
  type: "noUserInput",
11
11
  defaultLabel: "User Input Timeout - Config",
12
- tags: ["voice"],
12
+ tags: [""],
13
13
  appearance: {
14
14
  color: nodeColor,
15
15
  },
@@ -17,7 +17,7 @@ const fields = (voiceConfigFields.map(field => {
17
17
  export const PLAY = createNodeDescriptor({
18
18
  type: "play",
19
19
  defaultLabel: "Play URL",
20
- tags: ['voice'],
20
+ tags: [''],
21
21
  preview: {
22
22
  key: "url",
23
23
  type: "text"
@@ -8,7 +8,7 @@ export const SEND_METADATA = createNodeDescriptor({
8
8
  type: "sendMetadata",
9
9
  defaultLabel: "Send Metadata",
10
10
  summary: "UI__NODE_EDITOR__SEND_METADATA__SUMMARY",
11
- tags: ["voice"],
11
+ tags: [""],
12
12
  fields: [
13
13
  {
14
14
  key: "metadata",
@@ -291,7 +291,7 @@ export const SESSION_SPEECH_PARAMETERS = createNodeDescriptor({
291
291
  appearance: {
292
292
  color: nodeColor,
293
293
  },
294
- tags: ["voice"],
294
+ tags: [""],
295
295
  fields: voiceConfigFields,
296
296
  sections: [
297
297
  {
@@ -9,7 +9,7 @@ import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebu
9
9
  export const TRANSFER_VOICE = createNodeDescriptor({
10
10
  type: "transfer",
11
11
  defaultLabel: "Transfer",
12
- tags: ["voice"],
12
+ tags: [""],
13
13
  preview: {
14
14
  key: "referTo",
15
15
  type: "text",
@@ -14,7 +14,7 @@ export const dtmfNode = createNodeDescriptor({
14
14
  appearance: {
15
15
  color: nodeColor
16
16
  },
17
- tags: ["vg"],
17
+ tags: ["voice"],
18
18
  fields: [
19
19
  {
20
20
  key: "dtmf",
@@ -11,7 +11,7 @@ export const hangupNode = createNodeDescriptor({
11
11
  key: "hangupReason",
12
12
  type: "text"
13
13
  },
14
- tags: ["vg"],
14
+ tags: ["voice"],
15
15
  appearance: {
16
16
  color: nodeColor
17
17
  },
@@ -17,7 +17,7 @@ export const muteSpeechInputNode = createNodeDescriptor({
17
17
  appearance: {
18
18
  color: nodeColor
19
19
  },
20
- tags: ["vg"],
20
+ tags: ["voice"],
21
21
  fields: [
22
22
  {
23
23
  key: "muteSpeechInput",
@@ -16,7 +16,7 @@ export const playNode = createNodeDescriptor({
16
16
  appearance: {
17
17
  color: nodeColor
18
18
  },
19
- tags: ["vg"],
19
+ tags: ["voice"],
20
20
  fields: [
21
21
  {
22
22
  key: "url",
@@ -15,7 +15,7 @@ export const recordNode = createNodeDescriptor({
15
15
  appearance: {
16
16
  color: nodeColor
17
17
  },
18
- tags: ["vg"],
18
+ tags: ["voice"],
19
19
  constraints: {
20
20
  placement: {},
21
21
  },
@@ -15,7 +15,7 @@ export const referNode = createNodeDescriptor({
15
15
  appearance: {
16
16
  color: nodeColor
17
17
  },
18
- tags: ["vg"],
18
+ tags: ["voice"],
19
19
  constraints: {
20
20
  placement: {},
21
21
  creatable: false,
@@ -8,7 +8,7 @@ export const sendMetadataNode = createNodeDescriptor({
8
8
  type: "sendMetadata",
9
9
  defaultLabel: "Send Metadata",
10
10
  summary: "UI__NODE_EDITOR__VOICEGATEWAY2__SEND_METADATA__SUMMARY",
11
- tags: ["vg"],
11
+ tags: ["voice"],
12
12
  fields: [
13
13
  {
14
14
  key: "metadata",
@@ -1049,7 +1049,7 @@ export const setSessionConfigNode = createNodeDescriptor({
1049
1049
  fields: ["sessionParams"],
1050
1050
  },
1051
1051
  ],
1052
- tags: ["vg"],
1052
+ tags: ["voice"],
1053
1053
  form: [
1054
1054
  { type: "section", key: "params_tts" },
1055
1055
  { type: "section", key: "params_stt" },