@cognigy/rest-api-client 2025.25.0 → 2026.2.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/README.md +15 -0
  3. package/build/apigroups/InsightsAPIGroup_2_1.js +27 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +140 -375
  5. package/build/apigroups/SimulationAPIGroup_2_0.js +33 -18
  6. package/build/apigroups/aiAgentsV2/agent.js +3 -0
  7. package/build/apigroups/aiAgentsV2/agentAPI.js +38 -0
  8. package/build/apigroups/aiAgentsV2/agentPersona.js +3 -0
  9. package/build/apigroups/aiAgentsV2/agentPersonaAPI.js +38 -0
  10. package/build/apigroups/aiAgentsV2/tool.js +3 -0
  11. package/build/apigroups/aiAgentsV2/toolAPI.js +35 -0
  12. package/build/apigroups/aiAgentsV2/toolDescriptor.js +3 -0
  13. package/build/apigroups/aiAgentsV2/toolDescriptorAPI.js +13 -0
  14. package/build/apigroups/index.js +3 -1
  15. package/build/authentication/AuthenticationAPI.js +1 -0
  16. package/build/shared/charts/createNodeDescriptor.js +1 -0
  17. package/build/shared/charts/descriptors/analytics/overwriteAnalytics.js +14 -0
  18. package/build/shared/charts/descriptors/analytics/updateProfile.js +5 -0
  19. package/build/shared/charts/descriptors/connectionNodes/smtp/emailNotification.js +7 -0
  20. package/build/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  21. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +15 -0
  22. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +13 -0
  23. package/build/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +63 -10
  24. package/build/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +52 -0
  25. package/build/shared/charts/descriptors/connectionNodes/speechProviders/index.js +8 -7
  26. package/build/shared/charts/descriptors/index.js +6 -0
  27. package/build/shared/charts/descriptors/message/question/question.js +254 -59
  28. package/build/shared/charts/descriptors/message/say.js +3 -0
  29. package/build/shared/charts/descriptors/service/agentTools/executeWorkflowTool.js +239 -0
  30. package/build/shared/charts/descriptors/service/agentTools/handoverToHumanAgentTool.js +783 -0
  31. package/build/shared/charts/descriptors/service/agentTools/sendEmailTool.js +33 -4
  32. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +25 -20
  33. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +10 -6
  34. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +57 -1
  35. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +10 -1
  36. package/build/shared/charts/descriptors/service/aiAgent/helpers/parseMcpHeaders.js +26 -0
  37. package/build/shared/charts/descriptors/service/aiAgentV2.js +89 -0
  38. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  39. package/build/shared/charts/descriptors/service/httpRequest.js +3 -0
  40. package/build/shared/charts/descriptors/service/index.js +9 -1
  41. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +26 -17
  42. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +57 -1
  43. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +75 -15
  44. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +27 -5
  45. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +67 -2
  46. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  47. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +149 -4
  48. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +137 -4
  49. package/build/shared/errors/ErrorCode.js +2 -1
  50. package/build/shared/errors/ErrorCollection.js +1 -0
  51. package/build/shared/helper/BaseContext.js +1 -1
  52. package/build/shared/interfaces/amqpInterface.js +1 -0
  53. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  54. package/build/shared/interfaces/handover.js +1 -0
  55. package/build/shared/interfaces/handoverProviders.js +0 -1
  56. package/build/shared/interfaces/messageAPI/endpoints.js +4 -1
  57. package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
  58. package/build/shared/interfaces/resources/IChart.js +10 -1
  59. package/build/shared/interfaces/resources/IChartNode.js +32 -4
  60. package/build/shared/interfaces/resources/IEndpoint.js +1 -0
  61. package/build/shared/interfaces/resources/INodeDescriptorSet.js +8 -0
  62. package/build/shared/interfaces/resources/TResourceType.js +1 -0
  63. package/build/shared/interfaces/resources/chart/IChartExecutableNode.js +10 -1
  64. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeConnector.js +49 -0
  65. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  66. package/build/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  67. package/build/shared/interfaces/restAPI/administration/user/v2.0/IExchangeCXoneTokenRest_2_0.js +3 -0
  68. package/build/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +3 -0
  69. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/ICreateKnowledgeConnectorRest_2_0.js +3 -0
  70. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IDeleteKnowledgeConnectorRest_2_0.js +3 -0
  71. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IIndexKnowledgeConnectorsRest_2_0.js +3 -0
  72. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IKnowledgeConnector_2_0.js +3 -0
  73. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IReadKnowledgeConnectorRest_2_0.js +3 -0
  74. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +3 -0
  75. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IUpdateKnowledgeConnectorRest_2_0.js +3 -0
  76. package/build/shared/interfaces/restAPI/simulation/scheduler/ICreateSchedulerRest_2_0.js +3 -0
  77. package/build/shared/interfaces/restAPI/simulation/scheduler/IGetSchedulerRest_2_0.js +3 -0
  78. package/build/shared/interfaces/restAPI/simulation/scheduler/ISchedulerRest_2_0.js +12 -0
  79. package/build/shared/interfaces/restAPI/simulation/scheduler/IUpdateSchedulerRest_2_0.js +3 -0
  80. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +3 -0
  81. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +3 -0
  82. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +3 -0
  83. package/build/shared/interfaces/security/ISessionScope.js +3 -0
  84. package/build/spec/aiAgentV2.spec.js +564 -0
  85. package/dist/esm/apigroups/InsightsAPIGroup_2_1.js +13 -0
  86. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +140 -375
  87. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +33 -18
  88. package/dist/esm/apigroups/aiAgentsV2/agent.js +2 -0
  89. package/dist/esm/apigroups/aiAgentsV2/agentAPI.js +24 -0
  90. package/dist/esm/apigroups/aiAgentsV2/agentPersona.js +2 -0
  91. package/dist/esm/apigroups/aiAgentsV2/agentPersonaAPI.js +24 -0
  92. package/dist/esm/apigroups/aiAgentsV2/aiAgentV2API.js +2 -0
  93. package/dist/esm/apigroups/aiAgentsV2/tool.js +2 -0
  94. package/dist/esm/apigroups/aiAgentsV2/toolAPI.js +21 -0
  95. package/dist/esm/apigroups/aiAgentsV2/toolDescriptor.js +2 -0
  96. package/dist/esm/apigroups/aiAgentsV2/toolDescriptorAPI.js +9 -0
  97. package/dist/esm/apigroups/index.js +1 -0
  98. package/dist/esm/authentication/AuthenticationAPI.js +1 -0
  99. package/dist/esm/shared/charts/createNodeDescriptor.js +1 -0
  100. package/dist/esm/shared/charts/descriptors/analytics/overwriteAnalytics.js +14 -0
  101. package/dist/esm/shared/charts/descriptors/analytics/updateProfile.js +5 -0
  102. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/emailNotification.js +7 -0
  103. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  104. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +12 -0
  105. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +10 -0
  106. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +63 -10
  107. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +49 -0
  108. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/index.js +3 -3
  109. package/dist/esm/shared/charts/descriptors/index.js +7 -1
  110. package/dist/esm/shared/charts/descriptors/message/question/question.js +254 -59
  111. package/dist/esm/shared/charts/descriptors/message/say.js +3 -0
  112. package/dist/esm/shared/charts/descriptors/service/agentTools/executeWorkflowTool.js +237 -0
  113. package/dist/esm/shared/charts/descriptors/service/agentTools/handoverToHumanAgentTool.js +770 -0
  114. package/dist/esm/shared/charts/descriptors/service/agentTools/sendEmailTool.js +33 -4
  115. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +25 -20
  116. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +10 -6
  117. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +56 -0
  118. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +10 -1
  119. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/parseMcpHeaders.js +25 -0
  120. package/dist/esm/shared/charts/descriptors/service/aiAgentV2.js +87 -0
  121. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  122. package/dist/esm/shared/charts/descriptors/service/httpRequest.js +3 -0
  123. package/dist/esm/shared/charts/descriptors/service/index.js +4 -0
  124. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +33 -24
  125. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +56 -0
  126. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +75 -15
  127. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +27 -5
  128. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +67 -2
  129. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  130. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +149 -4
  131. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +137 -4
  132. package/dist/esm/shared/errors/ErrorCode.js +2 -1
  133. package/dist/esm/shared/errors/ErrorCollection.js +1 -0
  134. package/dist/esm/shared/helper/BaseContext.js +1 -1
  135. package/dist/esm/shared/interfaces/amqpInterface.js +1 -0
  136. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  137. package/dist/esm/shared/interfaces/handover.js +1 -0
  138. package/dist/esm/shared/interfaces/handoverProviders.js +0 -1
  139. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +4 -1
  140. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +1 -0
  141. package/dist/esm/shared/interfaces/resources/IChart.js +10 -1
  142. package/dist/esm/shared/interfaces/resources/IChartNode.js +32 -4
  143. package/dist/esm/shared/interfaces/resources/IEndpoint.js +1 -0
  144. package/dist/esm/shared/interfaces/resources/INodeDescriptorSet.js +8 -0
  145. package/dist/esm/shared/interfaces/resources/TResourceType.js +1 -0
  146. package/dist/esm/shared/interfaces/resources/chart/IChartExecutableNode.js +10 -1
  147. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeConnector.js +46 -0
  148. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  149. package/dist/esm/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  150. package/dist/esm/shared/interfaces/restAPI/administration/user/v2.0/IExchangeCXoneTokenRest_2_0.js +2 -0
  151. package/dist/esm/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +2 -0
  152. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/ICreateKnowledgeConnectorRest_2_0.js +2 -0
  153. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IDeleteKnowledgeConnectorRest_2_0.js +2 -0
  154. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IIndexKnowledgeConnectorsRest_2_0.js +2 -0
  155. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IKnowledgeConnector_2_0.js +2 -0
  156. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IReadKnowledgeConnectorRest_2_0.js +2 -0
  157. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +2 -0
  158. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IUpdateKnowledgeConnectorRest_2_0.js +2 -0
  159. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/ICreateSchedulerRest_2_0.js +2 -0
  160. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/IGetSchedulerRest_2_0.js +2 -0
  161. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/ISchedulerRest_2_0.js +9 -0
  162. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/IUpdateSchedulerRest_2_0.js +2 -0
  163. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +2 -0
  164. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +2 -0
  165. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +2 -0
  166. package/dist/esm/shared/interfaces/security/ISessionScope.js +2 -0
  167. package/dist/esm/spec/aiAgentV2.spec.js +563 -0
  168. package/package.json +6 -3
  169. package/types/index.d.ts +1198 -44
@@ -5,6 +5,7 @@ import * as crypto from "crypto";
5
5
  import { createNodeDescriptor } from "../../../createNodeDescriptor";
6
6
  import { GO_TO } from "../../logic";
7
7
  import { createToolDefinitions } from "../aiAgent/helpers/createToolDefinitions";
8
+ import { parseMcpHeaders } from "../aiAgent/helpers/parseMcpHeaders";
8
9
  import { createLastConverationString, createLastUserInputString, writeLLMDebugLogs } from "../../nlu/generativeSlotFiller/prompt";
9
10
  import { InternalServerError } from "../../../../errors";
10
11
  import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
@@ -16,7 +17,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
16
17
  collapsable: true,
17
18
  placement: {
18
19
  children: {
19
- whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool", "knowledgeTool", "handoverToAiAgentTool", "sendEmailTool"],
20
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool", "knowledgeTool", "handoverToAiAgentTool", "handoverToHumanAgentTool", "sendEmailTool", "executeWorkflowTool"],
20
21
  },
21
22
  },
22
23
  },
@@ -689,8 +690,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
689
690
  color: "#252525",
690
691
  },
691
692
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
693
+ mocking: {
694
+ defaultMockCode: `input.llmResponse = {response: "Mock response"};`
695
+ },
692
696
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
693
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
697
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0;
694
698
  const { api, input, flowReferenceId } = cognigy;
695
699
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
696
700
  errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, loggingHeaders, conditionForLogging, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
@@ -718,17 +722,17 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
718
722
  }
719
723
  // handle errors from external services, depending on the settings
720
724
  const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
721
- var _z, _0, _1, _2, _3, _4;
725
+ var _1, _2, _3, _4, _5, _6;
722
726
  const compactError = {
723
727
  name: error === null || error === void 0 ? void 0 : error.name,
724
728
  code: error === null || error === void 0 ? void 0 : error.code,
725
729
  message: (error === null || error === void 0 ? void 0 : error.message) || error
726
730
  };
727
731
  // return the requestId if it exist in the error obj.
728
- if ((_z = error === null || error === void 0 ? void 0 : error.meta) === null || _z === void 0 ? void 0 : _z.requestId) {
729
- compactError["requestId"] = (_0 = error === null || error === void 0 ? void 0 : error.meta) === null || _0 === void 0 ? void 0 : _0.requestId;
732
+ if ((_1 = error === null || error === void 0 ? void 0 : error.meta) === null || _1 === void 0 ? void 0 : _1.requestId) {
733
+ compactError["requestId"] = (_2 = error === null || error === void 0 ? void 0 : error.meta) === null || _2 === void 0 ? void 0 : _2.requestId;
730
734
  }
731
- if ((_1 = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _1 === void 0 ? void 0 : _1.code) {
735
+ if ((_3 = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _3 === void 0 ? void 0 : _3.code) {
732
736
  compactError.code = error.originalErrorDetails.code;
733
737
  }
734
738
  const errorResponse = {
@@ -737,7 +741,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
737
741
  // add error to context or input
738
742
  switch (storeLocation) {
739
743
  case "context":
740
- (_2 = api.addToContext) === null || _2 === void 0 ? void 0 : _2.call(api, contextKey, errorResponse, "simple");
744
+ (_4 = api.addToContext) === null || _4 === void 0 ? void 0 : _4.call(api, contextKey, errorResponse, "simple");
741
745
  break;
742
746
  default:
743
747
  api.addToInput(inputKey, errorResponse);
@@ -745,7 +749,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
745
749
  if (errorHandling === "continue") {
746
750
  // output the timeout message
747
751
  if (errorMessage) {
748
- yield ((_3 = api.output) === null || _3 === void 0 ? void 0 : _3.call(api, errorMessage, null));
752
+ yield ((_5 = api.output) === null || _5 === void 0 ? void 0 : _5.call(api, errorMessage, null));
749
753
  }
750
754
  // Continue with default node as next node
751
755
  const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
@@ -772,7 +776,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
772
776
  absorbContext: false
773
777
  }
774
778
  };
775
- yield ((_4 = GO_TO.function) === null || _4 === void 0 ? void 0 : _4.call(GO_TO, gotoParams));
779
+ yield ((_6 = GO_TO.function) === null || _6 === void 0 ? void 0 : _6.call(GO_TO, gotoParams));
776
780
  }
777
781
  else {
778
782
  throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
@@ -894,13 +898,16 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
894
898
  break;
895
899
  }
896
900
  }
901
+ let mcpHeaders = (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.mcpHeaders;
897
902
  if (!toolChild && toolMap.has(mainToolCall.function.name)) {
898
903
  // If the tool call is from an MCP tool, set the next node to the corresponding child node
899
904
  toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
900
905
  isMcpToolCall = true;
906
+ // Parse mcpHeaders values if present and resolve any Cognigy script expressions
907
+ mcpHeaders = yield parseMcpHeaders(toolChild === null || toolChild === void 0 ? void 0 : toolChild.config.mcpHeaders, api);
901
908
  }
902
909
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
903
- (_g = api.logDebugError) === null || _g === void 0 ? void 0 : _g.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
910
+ (_h = api.logDebugError) === null || _h === void 0 ? void 0 : _h.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
904
911
  }
905
912
  // Add last tool call to session state for loading it from Tool Answer Node
906
913
  api.updateSessionStateValues({
@@ -908,21 +915,23 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
908
915
  flow: flowReferenceId,
909
916
  node: nodeId,
910
917
  } }, (isMcpToolCall && {
911
- mcpServerUrl: (_h = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _h === void 0 ? void 0 : _h.mcpServerUrl,
912
- mcpHeaders: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.mcpHeaders,
918
+ mcpServerUrl: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.mcpServerUrl,
919
+ mcpHeaders,
913
920
  timeout: (_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.timeout,
914
921
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
922
+ authType: (_l = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _l === void 0 ? void 0 : _l.authType,
923
+ oAuth2Connection: (_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.oAuth2Connection,
915
924
  })), { toolCall: mainToolCall }),
916
925
  });
917
926
  // if there are any parameters/arguments, add them to the input slots
918
927
  if (mainToolCall.function.arguments) {
919
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_m = (_l = input.llmPrompt) === null || _l === void 0 ? void 0 : _l.toolArgs) !== null && _m !== void 0 ? _m : {}), mainToolCall.function.arguments) });
928
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_p = (_o = input.llmPrompt) === null || _o === void 0 ? void 0 : _o.toolArgs) !== null && _p !== void 0 ? _p : {}), mainToolCall.function.arguments) });
920
929
  }
921
930
  // Debug Message for Tool Calls, configured in the Tool Node
922
- if ((_o = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _o === void 0 ? void 0 : _o.debugMessage) {
931
+ if ((_q = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _q === void 0 ? void 0 : _q.debugMessage) {
923
932
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${yield api.parseCognigyScriptText(toolChild.config.toolId)}`];
924
933
  // Arguments / Parameters Slots
925
- const slots = ((_p = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _p === void 0 ? void 0 : _p.arguments) && Object.keys(mainToolCall.function.arguments);
934
+ const slots = ((_r = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _r === void 0 ? void 0 : _r.arguments) && Object.keys(mainToolCall.function.arguments);
926
935
  const hasSlots = slots && slots.length > 0;
927
936
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
928
937
  if (hasSlots) {
@@ -937,7 +946,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
937
946
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
938
947
  });
939
948
  }
940
- (_q = api.logDebugMessage) === null || _q === void 0 ? void 0 : _q.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
949
+ (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
941
950
  }
942
951
  if (toolChild) {
943
952
  api.setNextNode(toolChild.id);
@@ -962,11 +971,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
962
971
  // we stringify objects (e.g. results coming from JSON Mode)
963
972
  // so that the transcript only contains text
964
973
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
965
- yield ((_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, resultToOutput, {}));
974
+ yield ((_t = api.output) === null || _t === void 0 ? void 0 : _t.call(api, resultToOutput, {}));
966
975
  }
967
976
  else if (llmResult.finishReason && llmPromptOptions.stream) {
968
977
  // send the finishReason as last output for a stream
969
- (_s = api.output) === null || _s === void 0 ? void 0 : _s.call(api, "", {
978
+ (_u = api.output) === null || _u === void 0 ? void 0 : _u.call(api, "", {
970
979
  _cognigy: {
971
980
  _preventTranscript: true,
972
981
  _messageId,
@@ -989,7 +998,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
989
998
  }
990
999
  // Add response to Cognigy Input/Context for further usage
991
1000
  if (storeLocation === "context") {
992
- (_t = api.addToContext) === null || _t === void 0 ? void 0 : _t.call(api, contextKey, llmResult, "simple");
1001
+ (_v = api.addToContext) === null || _v === void 0 ? void 0 : _v.call(api, contextKey, llmResult, "simple");
993
1002
  }
994
1003
  else if (storeLocation === "input") {
995
1004
  api.addToInput(inputKey, llmResult);
@@ -1002,19 +1011,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
1002
1011
  const errorDetailsBase = {
1003
1012
  name: error === null || error === void 0 ? void 0 : error.name,
1004
1013
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1005
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_u = error.originalErrorDetails) === null || _u === void 0 ? void 0 : _u.message),
1014
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_w = error.originalErrorDetails) === null || _w === void 0 ? void 0 : _w.message),
1006
1015
  };
1007
1016
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
1008
1017
  // return the requestId if it exist in the error obj.
1009
- if ((_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId) {
1018
+ if ((_x = error.meta) === null || _x === void 0 ? void 0 : _x.requestId) {
1010
1019
  errorDetails["meta"] = {
1011
- requestId: (_w = error.meta) === null || _w === void 0 ? void 0 : _w.requestId
1020
+ requestId: (_y = error.meta) === null || _y === void 0 ? void 0 : _y.requestId
1012
1021
  };
1013
1022
  }
1014
1023
  if (logErrorToSystem) {
1015
- (_x = api.log) === null || _x === void 0 ? void 0 : _x.call(api, "error", JSON.stringify(errorDetailsBase));
1024
+ (_z = api.log) === null || _z === void 0 ? void 0 : _z.call(api, "error", JSON.stringify(errorDetailsBase));
1016
1025
  }
1017
- (_y = api.logDebugError) === null || _y === void 0 ? void 0 : _y.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
1026
+ (_0 = api.logDebugError) === null || _0 === void 0 ? void 0 : _0.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
1018
1027
  yield handleServiceError(errorDetails);
1019
1028
  return;
1020
1029
  }
@@ -1,5 +1,15 @@
1
1
  /* Custom modules */
2
2
  import { createNodeDescriptor } from "../../../createNodeDescriptor";
3
+ export const LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2 = {
4
+ type: "mcp_oauth2",
5
+ label: "UI__NODE_EDITOR__MCP_OAUTH2_CONNECTION__LABEL",
6
+ fields: [
7
+ { fieldName: "oAuth2Url", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
8
+ { fieldName: "oAuth2ClientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
9
+ { fieldName: "oAuth2ClientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
10
+ { fieldName: "oAuth2Scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
11
+ ]
12
+ };
3
13
  export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
4
14
  type: "llmPromptMCPTool",
5
15
  defaultLabel: "MCP Tool",
@@ -167,8 +177,53 @@ export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
167
177
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
168
178
  defaultValue: "{}",
169
179
  },
180
+ {
181
+ key: "oAuth2Connection",
182
+ type: "connection",
183
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__OAUTH2_CONNECTION__LABEL",
184
+ params: {
185
+ connectionType: LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2.type
186
+ },
187
+ condition: {
188
+ key: "authType",
189
+ value: "oAuth2"
190
+ }
191
+ },
192
+ {
193
+ key: "authType",
194
+ type: "select",
195
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__LABEL",
196
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__DESCRIPTION",
197
+ defaultValue: "none",
198
+ params: {
199
+ required: true,
200
+ options: [
201
+ {
202
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__NONE__LABEL",
203
+ value: "none"
204
+ },
205
+ {
206
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__OAUTH2__LABEL",
207
+ value: "oAuth2"
208
+ }
209
+ ]
210
+ },
211
+ resetOption: {
212
+ lookupValue: "none",
213
+ fieldsToReset: ["oAuth2Connection"]
214
+ }
215
+ },
170
216
  ],
171
217
  sections: [
218
+ {
219
+ key: "auth",
220
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__SECTIONS__AUTHENTICATION__LABEL",
221
+ defaultCollapsed: true,
222
+ fields: [
223
+ "authType",
224
+ "oAuth2Connection"
225
+ ]
226
+ },
172
227
  {
173
228
  key: "debugging",
174
229
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
@@ -188,6 +243,7 @@ export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
188
243
  { type: "field", key: "mcpWarning" },
189
244
  { type: "field", key: "mcpServerUrl" },
190
245
  { type: "field", key: "timeout" },
246
+ { type: "section", key: "auth" },
191
247
  { type: "section", key: "debugging" },
192
248
  { type: "section", key: "advanced" },
193
249
  ],
@@ -175,10 +175,10 @@ class SessionConfigMapper extends BaseMapper {
175
175
  return synthesizer;
176
176
  }
177
177
  buildRecognizer(sessionParams, stt, vad, azureConfig) {
178
- var _a, _b, _c;
178
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
179
179
  const { recognizer: sessionParamsRecognizer } = sessionParams || {};
180
- const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
181
- const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, sttDeepgramModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [] } = stt || {};
180
+ const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, deepgramfluxOptions: spDeepgramfluxOptions, speechmaticsOptions: spSpeechmaticsOptions, openaiOptions: spOpenaiOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
181
+ const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [], speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue } = stt || {};
182
182
  const recognizer = {};
183
183
  recognizer.language = spLanguage || sttLanguage || undefined;
184
184
  recognizer.hints = spHints || sttHints || undefined;
@@ -213,7 +213,7 @@ class SessionConfigMapper extends BaseMapper {
213
213
  * ssc node: 'deepgramEndpointing' => boolean, 'deepgramEndpointingValue' => number
214
214
  */
215
215
  const isDeepgramEndpointingEnabled = (_a = (typeof spEndpointing === "number" || deepgramEndpointing)) !== null && _a !== void 0 ? _a : false;
216
- recognizer.model = spModel || sttDeepgramModel;
216
+ recognizer.model = spModel || sttModel;
217
217
  const deepgramOptions = {
218
218
  endpointing: false,
219
219
  punctuate: recognizer.punctuation,
@@ -226,6 +226,58 @@ class SessionConfigMapper extends BaseMapper {
226
226
  }
227
227
  recognizer.deepgramOptions = deepgramOptions;
228
228
  }
229
+ if (recognizer.vendor === 'deepgramflux') {
230
+ const { endpointing: spEndpointing, } = spDeepgramfluxOptions || {};
231
+ /*
232
+ * session params: 'endpointing' is a boolean
233
+ * ssc node: 'deepgramfluxEndpointing' => boolean, 'deepgramfluxEndOfTurnThreshold' => number, 'deepgramfluxEndOfTurnTimeoutMs' => number
234
+ */
235
+ const isDeepgramfluxEndpointingEnabled = (_d = (typeof spEndpointing === "number" || deepgramfluxEndpointing)) !== null && _d !== void 0 ? _d : false;
236
+ const deepgramfluxOptions = {
237
+ endpointing: isDeepgramfluxEndpointingEnabled,
238
+ };
239
+ if (isDeepgramfluxEndpointingEnabled) {
240
+ deepgramfluxOptions.endOfTurnThreshold = deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7;
241
+ deepgramfluxOptions.endOfTurnTimeoutMs = deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000;
242
+ }
243
+ recognizer.deepgramfluxOptions = deepgramfluxOptions;
244
+ }
245
+ if (recognizer.vendor === 'speechmatics') {
246
+ const { endpointing: spEndpointing, } = spSpeechmaticsOptions || {};
247
+ /*
248
+ * session params: 'endpointing' is a number (milliseconds)
249
+ * ssc node: 'speechmaticsEndpointing' => boolean, 'speechmaticsEndpointingValue' => number
250
+ */
251
+ const isSpeechmaticsEndpointingEnabled = (_e = (typeof spEndpointing === "number" || speechmaticsEndpointing)) !== null && _e !== void 0 ? _e : false;
252
+ const speechmaticsOptions = {
253
+ transcription_config: {},
254
+ };
255
+ if (isSpeechmaticsEndpointingEnabled) {
256
+ speechmaticsOptions.endpointing = (_f = (spEndpointing || speechmaticsEndpointingValue)) !== null && _f !== void 0 ? _f : 500;
257
+ }
258
+ // When endpointing is disabled, simply don't include the property
259
+ // (the feature-server will use its default SPEECHMATICS_END_OF_UTTERANCE_SILENCE_DURATION_MS)
260
+ recognizer.speechmaticsOptions = speechmaticsOptions;
261
+ }
262
+ if (recognizer.vendor === 'openai') {
263
+ const openaiModel = spModel || sttModel;
264
+ if (openaiModel) {
265
+ recognizer.openaiOptions = Object.assign(Object.assign({}, ((_g = recognizer.openaiOptions) !== null && _g !== void 0 ? _g : {})), { model: openaiModel });
266
+ }
267
+ const { endpointing: spEndpointing, } = spOpenaiOptions || {};
268
+ /*
269
+ * session params: 'endpointing' is a number (milliseconds)
270
+ * ssc node: 'openaiEndpointing' => boolean, 'openaiEndpointingValue' => number
271
+ */
272
+ const isOpenaiEndpointingEnabled = (_h = (typeof spEndpointing === "number" || openaiEndpointing)) !== null && _h !== void 0 ? _h : false;
273
+ const openaiOptions = Object.assign({}, ((_j = recognizer.openaiOptions) !== null && _j !== void 0 ? _j : {}));
274
+ if (isOpenaiEndpointingEnabled) {
275
+ openaiOptions.endpointing = (_k = (spEndpointing || openaiEndpointingValue)) !== null && _k !== void 0 ? _k : 500;
276
+ }
277
+ // When endpointing is disabled, simply don't include the property
278
+ // (the feature-server will use its default OPENAI_TURN_DETECTION_SILENCE_DURATION_MS)
279
+ recognizer.openaiOptions = openaiOptions;
280
+ }
229
281
  }
230
282
  if (this.has(spVad) || this.has(vad)) {
231
283
  const { enable: spEnable, mode: spMode, voiceMs: spVoiceMs } = spVad || {};
@@ -292,7 +344,7 @@ class SessionConfigMapper extends BaseMapper {
292
344
  }
293
345
  const mapper = new SessionConfigMapper("voiceGateway2");
294
346
  export function voiceConfigParamsToVoiceSettings(config, api) {
295
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
347
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
296
348
  let voiceSettings = {};
297
349
  if (config.sttVendor === 'none') {
298
350
  delete config.sttVendor;
@@ -329,25 +381,33 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
329
381
  hints = [...hints, ...config.sttHints];
330
382
  }
331
383
  const deepgramEndpointing = (_d = config.deepgramEndpointing) !== null && _d !== void 0 ? _d : true;
384
+ const deepgramfluxEndpointing = (_e = config.deepgramfluxEndpointing) !== null && _e !== void 0 ? _e : true;
332
385
  const deepgramShortUtterance = deepgramEndpointing ? false : true;
333
386
  // stt (recognizer)
334
387
  voiceSettings.stt = {
335
388
  sttVendor: config.sttVendor,
336
389
  sttLanguage: config.sttLanguage,
337
390
  sttHints: hints,
338
- sttLabel: (_e = config.sttLabel) !== null && _e !== void 0 ? _e : undefined,
391
+ sttLabel: (_f = config.sttLabel) !== null && _f !== void 0 ? _f : undefined,
339
392
  sttDisablePunctuation: config.sttDisablePunctuation !== undefined &&
340
393
  config.sttDisablePunctuation !== null
341
394
  ? !config.sttDisablePunctuation
342
395
  : undefined,
343
- googleModel: (_f = config.googleModel) !== null && _f !== void 0 ? _f : undefined,
396
+ googleModel: (_g = config.googleModel) !== null && _g !== void 0 ? _g : undefined,
344
397
  /* by default we enable endpointing - it is only undefined via SAP */
345
398
  deepgramEndpointing,
346
- deepgramEndpointingValue: (_g = config.deepgramEndpointingValue) !== null && _g !== void 0 ? _g : 250,
347
- sttDeepgramModel: (_h = config.sttDeepgramModel) !== null && _h !== void 0 ? _h : "nova-2",
348
- deepgramSmartFormatting: (_j = config.deepgramSmartFormatting) !== null && _j !== void 0 ? _j : undefined,
399
+ deepgramEndpointingValue: (_h = config.deepgramEndpointingValue) !== null && _h !== void 0 ? _h : 250,
400
+ sttModel: config.sttModel || "",
401
+ deepgramfluxEndpointing,
402
+ deepgramfluxEndOfTurnThreshold: (_j = config.deepgramfluxEndOfTurnThreshold) !== null && _j !== void 0 ? _j : 0.7,
403
+ deepgramfluxEndOfTurnTimeoutMs: (_k = config.deepgramfluxEndOfTurnTimeoutMs) !== null && _k !== void 0 ? _k : 5000,
404
+ deepgramSmartFormatting: (_l = config.deepgramSmartFormatting) !== null && _l !== void 0 ? _l : undefined,
349
405
  deepgramShortUtterance,
350
- listenDuringPrompt: (_k = config.sttListenDuringPrompt) !== null && _k !== void 0 ? _k : undefined,
406
+ speechmaticsEndpointing: (_m = config.speechmaticsEndpointing) !== null && _m !== void 0 ? _m : true,
407
+ speechmaticsEndpointingValue: (_o = config.speechmaticsEndpointingValue) !== null && _o !== void 0 ? _o : 500,
408
+ openaiEndpointing: (_p = config.openaiEndpointing) !== null && _p !== void 0 ? _p : true,
409
+ openaiEndpointingValue: (_q = config.openaiEndpointingValue) !== null && _q !== void 0 ? _q : 500,
410
+ listenDuringPrompt: (_r = config.sttListenDuringPrompt) !== null && _r !== void 0 ? _r : undefined,
351
411
  };
352
412
  // tts (synthesizer)
353
413
  voiceSettings.tts = {
@@ -392,7 +452,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
392
452
  voiceSettings.stt.altLanguages = [];
393
453
  }
394
454
  }
395
- if (config.ttsVendor === "elevenlabs") {
455
+ if (config.ttsVendor === "elevenlabs" || config.ttsVendor === "openai") {
396
456
  voiceSettings.tts.ttsModel = config.ttsModel;
397
457
  }
398
458
  // userNoInput
@@ -414,7 +474,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
414
474
  flowNoInputFail: config.flowNoInputFail
415
475
  };
416
476
  // Check if userNoInputTimeout has a value and userNoInputTimeoutEnable is null or undefined to cover generic nodes
417
- if (((_l = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _l === void 0 ? void 0 : _l.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
477
+ if (((_s = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _s === void 0 ? void 0 : _s.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
418
478
  voiceSettings.userNoInput.userNoInputTimeoutEnable = true;
419
479
  }
420
480
  voiceSettings.dtmf = {
@@ -422,7 +482,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
422
482
  dtmfInterDigitTimeout: config.dtmfInterDigitTimeout,
423
483
  dtmfMaxDigits: config.dtmfMaxDigits,
424
484
  dtmfMinDigits: config.dtmfMinDigits,
425
- dtmfSubmitDigit: (_m = config.dtmfSubmitDigit) === null || _m === void 0 ? void 0 : _m.trim(),
485
+ dtmfSubmitDigit: (_t = config.dtmfSubmitDigit) === null || _t === void 0 ? void 0 : _t.trim(),
426
486
  };
427
487
  if (config === null || config === void 0 ? void 0 : config.dtmfEnable) {
428
488
  if (voiceSettings.dtmf.dtmfSubmitDigit &&
@@ -459,7 +519,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
459
519
  }
460
520
  // atmosphere sounds
461
521
  if (config.atmosphereAction) {
462
- if ((_o = config.atmosphereUrl) === null || _o === void 0 ? void 0 : _o.length) {
522
+ if ((_u = config.atmosphereUrl) === null || _u === void 0 ? void 0 : _u.length) {
463
523
  if (!isValidUrl(config.atmosphereUrl)) {
464
524
  throw new Error(`Audio file URL is invalid ${config.atmosphereUrl}`);
465
525
  }
@@ -2,7 +2,7 @@
2
2
  import { cleanTarget } from "../../../descriptors/voicegateway2/utils/helper";
3
3
  import { isValidUrl, isValidPhoneNumber } from "../utils/helper";
4
4
  export const transfer = {
5
- handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
5
+ handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
6
6
  try {
7
7
  switch (endpointType) {
8
8
  case "bandwidth":
@@ -21,14 +21,15 @@ export const transfer = {
21
21
  return this.handleAudioCodesInput(prepareTransferParams(params), endpointType);
22
22
  case "voiceGateway2":
23
23
  default:
24
- return this.handleVGInput(prepareTransferParams(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia);
24
+ return this.handleVGInput(prepareTransferParams(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia);
25
25
  }
26
26
  }
27
27
  catch (error) {
28
28
  throw Error(error.message);
29
29
  }
30
30
  },
31
- handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
31
+ handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
32
+ var _a;
32
33
  const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
33
34
  const payload = {
34
35
  _voiceGateway2: {
@@ -119,14 +120,35 @@ export const transfer = {
119
120
  recognizer.model = googleModel;
120
121
  }
121
122
  if (recognizer.vendor === 'deepgram') {
122
- recognizer.model = sttDeepgramModel;
123
+ recognizer.model = sttModel;
123
124
  recognizer.deepgramOptions = {
124
- model: sttDeepgramModel,
125
+ model: sttModel,
125
126
  punctuate: !sttDisablePunctuation,
126
127
  endpointing: deepgramEndpointing ? deepgramEndpointingValue : false,
127
128
  smartFormatting: deepgramSmartFormatting !== null && deepgramSmartFormatting !== void 0 ? deepgramSmartFormatting : false
128
129
  };
129
130
  }
131
+ if (recognizer.vendor === 'deepgramflux') {
132
+ recognizer.deepgramfluxOptions = {
133
+ endpointing: deepgramfluxEndpointing || true,
134
+ endOfTurnThreshold: deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7,
135
+ endOfTurnTimeoutMs: deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000
136
+ };
137
+ }
138
+ if (recognizer.vendor === 'speechmatics') {
139
+ recognizer.speechmaticsOptions = {
140
+ transcription_config: {},
141
+ };
142
+ if (speechmaticsEndpointing) {
143
+ recognizer.speechmaticsOptions.endpointing = speechmaticsEndpointingValue !== null && speechmaticsEndpointingValue !== void 0 ? speechmaticsEndpointingValue : 500;
144
+ }
145
+ }
146
+ if (recognizer.vendor === 'openai') {
147
+ recognizer.openaiOptions = Object.assign({}, ((_a = recognizer.openaiOptions) !== null && _a !== void 0 ? _a : {}));
148
+ if (openaiEndpointing) {
149
+ recognizer.openaiOptions.endpointing = openaiEndpointingValue !== null && openaiEndpointingValue !== void 0 ? openaiEndpointingValue : 500;
150
+ }
151
+ }
130
152
  if (sttLabel) {
131
153
  recognizer.label = sttLabel;
132
154
  }
@@ -125,6 +125,64 @@ export const voiceConfigFields = [
125
125
  value: "deepgram"
126
126
  }
127
127
  },
128
+ {
129
+ key: "deepgramfluxEndpointing",
130
+ type: "toggle",
131
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
132
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
133
+ defaultValue: true,
134
+ condition: {
135
+ key: "sttVendor",
136
+ value: "deepgramflux"
137
+ }
138
+ },
139
+ {
140
+ key: "deepgramfluxEndOfTurnThreshold",
141
+ type: "slider",
142
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
143
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
144
+ defaultValue: 0.7,
145
+ params: {
146
+ min: 0.5,
147
+ max: 0.9,
148
+ step: 0.1
149
+ },
150
+ condition: {
151
+ and: [
152
+ {
153
+ key: "sttVendor",
154
+ value: "deepgramflux"
155
+ },
156
+ {
157
+ key: "deepgramfluxEndpointing",
158
+ value: true
159
+ },
160
+ ]
161
+ }
162
+ },
163
+ {
164
+ key: "deepgramfluxEndOfTurnTimeoutMs",
165
+ type: "number",
166
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
167
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
168
+ defaultValue: 5000,
169
+ params: {
170
+ min: 500,
171
+ max: 10000
172
+ },
173
+ condition: {
174
+ and: [
175
+ {
176
+ key: "sttVendor",
177
+ value: "deepgramflux"
178
+ },
179
+ {
180
+ key: "deepgramfluxEndpointing",
181
+ value: true
182
+ },
183
+ ]
184
+ }
185
+ },
128
186
  {
129
187
  key: "enableAdvancedSTTConfig",
130
188
  type: "toggle",
@@ -262,7 +320,7 @@ export const voiceConfigFields = [
262
320
  defaultValue: "",
263
321
  params: {
264
322
  languageKey: "config.sttLanguage",
265
- modelKey: "config.sttDeepgramModel",
323
+ modelKey: "config.sttModel",
266
324
  }
267
325
  },
268
326
  {
@@ -273,7 +331,7 @@ export const voiceConfigFields = [
273
331
  defaultValue: "",
274
332
  },
275
333
  {
276
- key: "sttDeepgramModel",
334
+ key: "sttModel",
277
335
  type: "sttSelect",
278
336
  label: "_unused_",
279
337
  description: "_unused_",
@@ -303,6 +361,13 @@ export const SESSION_SPEECH_PARAMETERS = createNodeDescriptor({
303
361
  "deepgramEndpointing",
304
362
  "deepgramEndpointingValue",
305
363
  "deepgramSmartFormatting",
364
+ "deepgramfluxEndpointing",
365
+ "deepgramfluxEndOfTurnThreshold",
366
+ "deepgramfluxEndOfTurnTimeoutMs",
367
+ "speechmaticsEndpointing",
368
+ "speechmaticsEndpointingValue",
369
+ "openaiEndpointing",
370
+ "openaiEndpointingValue",
306
371
  "sttHints",
307
372
  "sttHintsDynamicHints",
308
373
  "googleModel",
@@ -71,6 +71,13 @@ export const playNode = createNodeDescriptor({
71
71
  "deepgramEndpointing",
72
72
  "deepgramEndpointingValue",
73
73
  "deepgramSmartFormatting",
74
+ "deepgramfluxEndpointing",
75
+ "deepgramfluxEndOfTurnThreshold",
76
+ "deepgramfluxEndOfTurnTimeoutMs",
77
+ "speechmaticsEndpointing",
78
+ "speechmaticsEndpointingValue",
79
+ "openaiEndpointing",
80
+ "openaiEndpointingValue",
74
81
  "sttDisablePunctuation",
75
82
  "sttVadEnabled",
76
83
  "sttVadMode",