@cognigy/rest-api-client 2025.25.0 → 2026.2.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/README.md +15 -0
  3. package/build/apigroups/InsightsAPIGroup_2_1.js +27 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +140 -375
  5. package/build/apigroups/SimulationAPIGroup_2_0.js +33 -18
  6. package/build/apigroups/aiAgentsV2/agent.js +3 -0
  7. package/build/apigroups/aiAgentsV2/agentAPI.js +38 -0
  8. package/build/apigroups/aiAgentsV2/agentPersona.js +3 -0
  9. package/build/apigroups/aiAgentsV2/agentPersonaAPI.js +38 -0
  10. package/build/apigroups/aiAgentsV2/tool.js +3 -0
  11. package/build/apigroups/aiAgentsV2/toolAPI.js +35 -0
  12. package/build/apigroups/aiAgentsV2/toolDescriptor.js +3 -0
  13. package/build/apigroups/aiAgentsV2/toolDescriptorAPI.js +13 -0
  14. package/build/apigroups/index.js +3 -1
  15. package/build/authentication/AuthenticationAPI.js +1 -0
  16. package/build/shared/charts/createNodeDescriptor.js +1 -0
  17. package/build/shared/charts/descriptors/analytics/overwriteAnalytics.js +14 -0
  18. package/build/shared/charts/descriptors/analytics/updateProfile.js +5 -0
  19. package/build/shared/charts/descriptors/connectionNodes/smtp/emailNotification.js +7 -0
  20. package/build/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  21. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +15 -0
  22. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +13 -0
  23. package/build/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +63 -10
  24. package/build/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +52 -0
  25. package/build/shared/charts/descriptors/connectionNodes/speechProviders/index.js +8 -7
  26. package/build/shared/charts/descriptors/index.js +6 -0
  27. package/build/shared/charts/descriptors/message/question/question.js +254 -59
  28. package/build/shared/charts/descriptors/message/say.js +3 -0
  29. package/build/shared/charts/descriptors/service/agentTools/executeWorkflowTool.js +239 -0
  30. package/build/shared/charts/descriptors/service/agentTools/handoverToHumanAgentTool.js +783 -0
  31. package/build/shared/charts/descriptors/service/agentTools/sendEmailTool.js +33 -4
  32. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +25 -20
  33. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +10 -6
  34. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +57 -1
  35. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +10 -1
  36. package/build/shared/charts/descriptors/service/aiAgent/helpers/parseMcpHeaders.js +26 -0
  37. package/build/shared/charts/descriptors/service/aiAgentV2.js +89 -0
  38. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  39. package/build/shared/charts/descriptors/service/httpRequest.js +3 -0
  40. package/build/shared/charts/descriptors/service/index.js +9 -1
  41. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +26 -17
  42. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +57 -1
  43. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +75 -15
  44. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +27 -5
  45. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +67 -2
  46. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  47. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +149 -4
  48. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +137 -4
  49. package/build/shared/errors/ErrorCode.js +2 -1
  50. package/build/shared/errors/ErrorCollection.js +1 -0
  51. package/build/shared/helper/BaseContext.js +1 -1
  52. package/build/shared/interfaces/amqpInterface.js +1 -0
  53. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  54. package/build/shared/interfaces/handover.js +1 -0
  55. package/build/shared/interfaces/handoverProviders.js +0 -1
  56. package/build/shared/interfaces/messageAPI/endpoints.js +4 -1
  57. package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
  58. package/build/shared/interfaces/resources/IChart.js +10 -1
  59. package/build/shared/interfaces/resources/IChartNode.js +32 -4
  60. package/build/shared/interfaces/resources/IEndpoint.js +1 -0
  61. package/build/shared/interfaces/resources/INodeDescriptorSet.js +8 -0
  62. package/build/shared/interfaces/resources/TResourceType.js +1 -0
  63. package/build/shared/interfaces/resources/chart/IChartExecutableNode.js +10 -1
  64. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeConnector.js +49 -0
  65. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  66. package/build/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  67. package/build/shared/interfaces/restAPI/administration/user/v2.0/IExchangeCXoneTokenRest_2_0.js +3 -0
  68. package/build/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +3 -0
  69. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/ICreateKnowledgeConnectorRest_2_0.js +3 -0
  70. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IDeleteKnowledgeConnectorRest_2_0.js +3 -0
  71. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IIndexKnowledgeConnectorsRest_2_0.js +3 -0
  72. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IKnowledgeConnector_2_0.js +3 -0
  73. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IReadKnowledgeConnectorRest_2_0.js +3 -0
  74. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +3 -0
  75. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IUpdateKnowledgeConnectorRest_2_0.js +3 -0
  76. package/build/shared/interfaces/restAPI/simulation/scheduler/ICreateSchedulerRest_2_0.js +3 -0
  77. package/build/shared/interfaces/restAPI/simulation/scheduler/IGetSchedulerRest_2_0.js +3 -0
  78. package/build/shared/interfaces/restAPI/simulation/scheduler/ISchedulerRest_2_0.js +12 -0
  79. package/build/shared/interfaces/restAPI/simulation/scheduler/IUpdateSchedulerRest_2_0.js +3 -0
  80. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +3 -0
  81. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +3 -0
  82. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +3 -0
  83. package/build/shared/interfaces/security/ISessionScope.js +3 -0
  84. package/build/spec/aiAgentV2.spec.js +564 -0
  85. package/dist/esm/apigroups/InsightsAPIGroup_2_1.js +13 -0
  86. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +140 -375
  87. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +33 -18
  88. package/dist/esm/apigroups/aiAgentsV2/agent.js +2 -0
  89. package/dist/esm/apigroups/aiAgentsV2/agentAPI.js +24 -0
  90. package/dist/esm/apigroups/aiAgentsV2/agentPersona.js +2 -0
  91. package/dist/esm/apigroups/aiAgentsV2/agentPersonaAPI.js +24 -0
  92. package/dist/esm/apigroups/aiAgentsV2/aiAgentV2API.js +2 -0
  93. package/dist/esm/apigroups/aiAgentsV2/tool.js +2 -0
  94. package/dist/esm/apigroups/aiAgentsV2/toolAPI.js +21 -0
  95. package/dist/esm/apigroups/aiAgentsV2/toolDescriptor.js +2 -0
  96. package/dist/esm/apigroups/aiAgentsV2/toolDescriptorAPI.js +9 -0
  97. package/dist/esm/apigroups/index.js +1 -0
  98. package/dist/esm/authentication/AuthenticationAPI.js +1 -0
  99. package/dist/esm/shared/charts/createNodeDescriptor.js +1 -0
  100. package/dist/esm/shared/charts/descriptors/analytics/overwriteAnalytics.js +14 -0
  101. package/dist/esm/shared/charts/descriptors/analytics/updateProfile.js +5 -0
  102. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/emailNotification.js +7 -0
  103. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  104. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +12 -0
  105. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +10 -0
  106. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +63 -10
  107. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +49 -0
  108. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/index.js +3 -3
  109. package/dist/esm/shared/charts/descriptors/index.js +7 -1
  110. package/dist/esm/shared/charts/descriptors/message/question/question.js +254 -59
  111. package/dist/esm/shared/charts/descriptors/message/say.js +3 -0
  112. package/dist/esm/shared/charts/descriptors/service/agentTools/executeWorkflowTool.js +237 -0
  113. package/dist/esm/shared/charts/descriptors/service/agentTools/handoverToHumanAgentTool.js +770 -0
  114. package/dist/esm/shared/charts/descriptors/service/agentTools/sendEmailTool.js +33 -4
  115. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +25 -20
  116. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +10 -6
  117. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +56 -0
  118. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +10 -1
  119. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/parseMcpHeaders.js +25 -0
  120. package/dist/esm/shared/charts/descriptors/service/aiAgentV2.js +87 -0
  121. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  122. package/dist/esm/shared/charts/descriptors/service/httpRequest.js +3 -0
  123. package/dist/esm/shared/charts/descriptors/service/index.js +4 -0
  124. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +33 -24
  125. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +56 -0
  126. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +75 -15
  127. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +27 -5
  128. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +67 -2
  129. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  130. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +149 -4
  131. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +137 -4
  132. package/dist/esm/shared/errors/ErrorCode.js +2 -1
  133. package/dist/esm/shared/errors/ErrorCollection.js +1 -0
  134. package/dist/esm/shared/helper/BaseContext.js +1 -1
  135. package/dist/esm/shared/interfaces/amqpInterface.js +1 -0
  136. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  137. package/dist/esm/shared/interfaces/handover.js +1 -0
  138. package/dist/esm/shared/interfaces/handoverProviders.js +0 -1
  139. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +4 -1
  140. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +1 -0
  141. package/dist/esm/shared/interfaces/resources/IChart.js +10 -1
  142. package/dist/esm/shared/interfaces/resources/IChartNode.js +32 -4
  143. package/dist/esm/shared/interfaces/resources/IEndpoint.js +1 -0
  144. package/dist/esm/shared/interfaces/resources/INodeDescriptorSet.js +8 -0
  145. package/dist/esm/shared/interfaces/resources/TResourceType.js +1 -0
  146. package/dist/esm/shared/interfaces/resources/chart/IChartExecutableNode.js +10 -1
  147. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeConnector.js +46 -0
  148. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  149. package/dist/esm/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  150. package/dist/esm/shared/interfaces/restAPI/administration/user/v2.0/IExchangeCXoneTokenRest_2_0.js +2 -0
  151. package/dist/esm/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +2 -0
  152. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/ICreateKnowledgeConnectorRest_2_0.js +2 -0
  153. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IDeleteKnowledgeConnectorRest_2_0.js +2 -0
  154. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IIndexKnowledgeConnectorsRest_2_0.js +2 -0
  155. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IKnowledgeConnector_2_0.js +2 -0
  156. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IReadKnowledgeConnectorRest_2_0.js +2 -0
  157. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +2 -0
  158. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IUpdateKnowledgeConnectorRest_2_0.js +2 -0
  159. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/ICreateSchedulerRest_2_0.js +2 -0
  160. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/IGetSchedulerRest_2_0.js +2 -0
  161. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/ISchedulerRest_2_0.js +9 -0
  162. package/dist/esm/shared/interfaces/restAPI/simulation/scheduler/IUpdateSchedulerRest_2_0.js +2 -0
  163. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +2 -0
  164. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +2 -0
  165. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +2 -0
  166. package/dist/esm/shared/interfaces/security/ISessionScope.js +2 -0
  167. package/dist/esm/spec/aiAgentV2.spec.js +563 -0
  168. package/package.json +6 -3
  169. package/types/index.d.ts +1198 -44
@@ -18,6 +18,7 @@ const crypto = require("crypto");
18
18
  const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
19
19
  const logic_1 = require("../../logic");
20
20
  const createToolDefinitions_1 = require("../aiAgent/helpers/createToolDefinitions");
21
+ const parseMcpHeaders_1 = require("../aiAgent/helpers/parseMcpHeaders");
21
22
  const prompt_1 = require("../../nlu/generativeSlotFiller/prompt");
22
23
  const errors_1 = require("../../../../errors");
23
24
  const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
@@ -29,7 +30,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
29
30
  collapsable: true,
30
31
  placement: {
31
32
  children: {
32
- whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool", "knowledgeTool", "handoverToAiAgentTool", "sendEmailTool"],
33
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool", "knowledgeTool", "handoverToAiAgentTool", "handoverToHumanAgentTool", "sendEmailTool", "executeWorkflowTool"],
33
34
  },
34
35
  },
35
36
  },
@@ -702,8 +703,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
702
703
  color: "#252525",
703
704
  },
704
705
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
706
+ mocking: {
707
+ defaultMockCode: `input.llmResponse = {response: "Mock response"};`
708
+ },
705
709
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
706
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
710
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0;
707
711
  const { api, input, flowReferenceId } = cognigy;
708
712
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
709
713
  errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, loggingHeaders, conditionForLogging, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
@@ -907,13 +911,16 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
907
911
  break;
908
912
  }
909
913
  }
914
+ let mcpHeaders = (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.mcpHeaders;
910
915
  if (!toolChild && toolMap.has(mainToolCall.function.name)) {
911
916
  // If the tool call is from an MCP tool, set the next node to the corresponding child node
912
917
  toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
913
918
  isMcpToolCall = true;
919
+ // Parse mcpHeaders values if present and resolve any Cognigy script expressions
920
+ mcpHeaders = await (0, parseMcpHeaders_1.parseMcpHeaders)(toolChild === null || toolChild === void 0 ? void 0 : toolChild.config.mcpHeaders, api);
914
921
  }
915
922
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
916
- (_g = api.logDebugError) === null || _g === void 0 ? void 0 : _g.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
923
+ (_h = api.logDebugError) === null || _h === void 0 ? void 0 : _h.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
917
924
  }
918
925
  // Add last tool call to session state for loading it from Tool Answer Node
919
926
  api.updateSessionStateValues({
@@ -921,21 +928,23 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
921
928
  flow: flowReferenceId,
922
929
  node: nodeId,
923
930
  } }, (isMcpToolCall && {
924
- mcpServerUrl: (_h = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _h === void 0 ? void 0 : _h.mcpServerUrl,
925
- mcpHeaders: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.mcpHeaders,
931
+ mcpServerUrl: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.mcpServerUrl,
932
+ mcpHeaders,
926
933
  timeout: (_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.timeout,
927
934
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
935
+ authType: (_l = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _l === void 0 ? void 0 : _l.authType,
936
+ oAuth2Connection: (_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.oAuth2Connection,
928
937
  })), { toolCall: mainToolCall }),
929
938
  });
930
939
  // if there are any parameters/arguments, add them to the input slots
931
940
  if (mainToolCall.function.arguments) {
932
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_m = (_l = input.llmPrompt) === null || _l === void 0 ? void 0 : _l.toolArgs) !== null && _m !== void 0 ? _m : {}), mainToolCall.function.arguments) });
941
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_p = (_o = input.llmPrompt) === null || _o === void 0 ? void 0 : _o.toolArgs) !== null && _p !== void 0 ? _p : {}), mainToolCall.function.arguments) });
933
942
  }
934
943
  // Debug Message for Tool Calls, configured in the Tool Node
935
- if ((_o = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _o === void 0 ? void 0 : _o.debugMessage) {
944
+ if ((_q = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _q === void 0 ? void 0 : _q.debugMessage) {
936
945
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${await api.parseCognigyScriptText(toolChild.config.toolId)}`];
937
946
  // Arguments / Parameters Slots
938
- const slots = ((_p = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _p === void 0 ? void 0 : _p.arguments) && Object.keys(mainToolCall.function.arguments);
947
+ const slots = ((_r = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _r === void 0 ? void 0 : _r.arguments) && Object.keys(mainToolCall.function.arguments);
939
948
  const hasSlots = slots && slots.length > 0;
940
949
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
941
950
  if (hasSlots) {
@@ -950,7 +959,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
950
959
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
951
960
  });
952
961
  }
953
- (_q = api.logDebugMessage) === null || _q === void 0 ? void 0 : _q.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
962
+ (_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
954
963
  }
955
964
  if (toolChild) {
956
965
  api.setNextNode(toolChild.id);
@@ -975,11 +984,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
975
984
  // we stringify objects (e.g. results coming from JSON Mode)
976
985
  // so that the transcript only contains text
977
986
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
978
- await ((_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, resultToOutput, {}));
987
+ await ((_t = api.output) === null || _t === void 0 ? void 0 : _t.call(api, resultToOutput, {}));
979
988
  }
980
989
  else if (llmResult.finishReason && llmPromptOptions.stream) {
981
990
  // send the finishReason as last output for a stream
982
- (_s = api.output) === null || _s === void 0 ? void 0 : _s.call(api, "", {
991
+ (_u = api.output) === null || _u === void 0 ? void 0 : _u.call(api, "", {
983
992
  _cognigy: {
984
993
  _preventTranscript: true,
985
994
  _messageId,
@@ -1002,7 +1011,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
1002
1011
  }
1003
1012
  // Add response to Cognigy Input/Context for further usage
1004
1013
  if (storeLocation === "context") {
1005
- (_t = api.addToContext) === null || _t === void 0 ? void 0 : _t.call(api, contextKey, llmResult, "simple");
1014
+ (_v = api.addToContext) === null || _v === void 0 ? void 0 : _v.call(api, contextKey, llmResult, "simple");
1006
1015
  }
1007
1016
  else if (storeLocation === "input") {
1008
1017
  api.addToInput(inputKey, llmResult);
@@ -1015,19 +1024,19 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
1015
1024
  const errorDetailsBase = {
1016
1025
  name: error === null || error === void 0 ? void 0 : error.name,
1017
1026
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1018
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_u = error.originalErrorDetails) === null || _u === void 0 ? void 0 : _u.message),
1027
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_w = error.originalErrorDetails) === null || _w === void 0 ? void 0 : _w.message),
1019
1028
  };
1020
1029
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
1021
1030
  // return the requestId if it exist in the error obj.
1022
- if ((_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId) {
1031
+ if ((_x = error.meta) === null || _x === void 0 ? void 0 : _x.requestId) {
1023
1032
  errorDetails["meta"] = {
1024
- requestId: (_w = error.meta) === null || _w === void 0 ? void 0 : _w.requestId
1033
+ requestId: (_y = error.meta) === null || _y === void 0 ? void 0 : _y.requestId
1025
1034
  };
1026
1035
  }
1027
1036
  if (logErrorToSystem) {
1028
- (_x = api.log) === null || _x === void 0 ? void 0 : _x.call(api, "error", JSON.stringify(errorDetailsBase));
1037
+ (_z = api.log) === null || _z === void 0 ? void 0 : _z.call(api, "error", JSON.stringify(errorDetailsBase));
1029
1038
  }
1030
- (_y = api.logDebugError) === null || _y === void 0 ? void 0 : _y.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
1039
+ (_0 = api.logDebugError) === null || _0 === void 0 ? void 0 : _0.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
1031
1040
  await handleServiceError(errorDetails);
1032
1041
  return;
1033
1042
  }
@@ -1,8 +1,18 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LLM_PROMPT_MCP_TOOL = void 0;
3
+ exports.LLM_PROMPT_MCP_TOOL = exports.LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2 = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
6
+ exports.LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2 = {
7
+ type: "mcp_oauth2",
8
+ label: "UI__NODE_EDITOR__MCP_OAUTH2_CONNECTION__LABEL",
9
+ fields: [
10
+ { fieldName: "oAuth2Url", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
11
+ { fieldName: "oAuth2ClientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
12
+ { fieldName: "oAuth2ClientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
13
+ { fieldName: "oAuth2Scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
14
+ ]
15
+ };
6
16
  exports.LLM_PROMPT_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
7
17
  type: "llmPromptMCPTool",
8
18
  defaultLabel: "MCP Tool",
@@ -170,8 +180,53 @@ exports.LLM_PROMPT_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
170
180
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
171
181
  defaultValue: "{}",
172
182
  },
183
+ {
184
+ key: "oAuth2Connection",
185
+ type: "connection",
186
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__OAUTH2_CONNECTION__LABEL",
187
+ params: {
188
+ connectionType: exports.LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2.type
189
+ },
190
+ condition: {
191
+ key: "authType",
192
+ value: "oAuth2"
193
+ }
194
+ },
195
+ {
196
+ key: "authType",
197
+ type: "select",
198
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__LABEL",
199
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__DESCRIPTION",
200
+ defaultValue: "none",
201
+ params: {
202
+ required: true,
203
+ options: [
204
+ {
205
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__NONE__LABEL",
206
+ value: "none"
207
+ },
208
+ {
209
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__OAUTH2__LABEL",
210
+ value: "oAuth2"
211
+ }
212
+ ]
213
+ },
214
+ resetOption: {
215
+ lookupValue: "none",
216
+ fieldsToReset: ["oAuth2Connection"]
217
+ }
218
+ },
173
219
  ],
174
220
  sections: [
221
+ {
222
+ key: "auth",
223
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__SECTIONS__AUTHENTICATION__LABEL",
224
+ defaultCollapsed: true,
225
+ fields: [
226
+ "authType",
227
+ "oAuth2Connection"
228
+ ]
229
+ },
175
230
  {
176
231
  key: "debugging",
177
232
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
@@ -191,6 +246,7 @@ exports.LLM_PROMPT_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
191
246
  { type: "field", key: "mcpWarning" },
192
247
  { type: "field", key: "mcpServerUrl" },
193
248
  { type: "field", key: "timeout" },
249
+ { type: "section", key: "auth" },
194
250
  { type: "section", key: "debugging" },
195
251
  { type: "section", key: "advanced" },
196
252
  ],
@@ -178,10 +178,10 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
178
178
  return synthesizer;
179
179
  }
180
180
  buildRecognizer(sessionParams, stt, vad, azureConfig) {
181
- var _a, _b, _c;
181
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
182
182
  const { recognizer: sessionParamsRecognizer } = sessionParams || {};
183
- const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
184
- const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, sttDeepgramModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [] } = stt || {};
183
+ const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, deepgramfluxOptions: spDeepgramfluxOptions, speechmaticsOptions: spSpeechmaticsOptions, openaiOptions: spOpenaiOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
184
+ const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [], speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue } = stt || {};
185
185
  const recognizer = {};
186
186
  recognizer.language = spLanguage || sttLanguage || undefined;
187
187
  recognizer.hints = spHints || sttHints || undefined;
@@ -216,7 +216,7 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
216
216
  * ssc node: 'deepgramEndpointing' => boolean, 'deepgramEndpointingValue' => number
217
217
  */
218
218
  const isDeepgramEndpointingEnabled = (_a = (typeof spEndpointing === "number" || deepgramEndpointing)) !== null && _a !== void 0 ? _a : false;
219
- recognizer.model = spModel || sttDeepgramModel;
219
+ recognizer.model = spModel || sttModel;
220
220
  const deepgramOptions = {
221
221
  endpointing: false,
222
222
  punctuate: recognizer.punctuation,
@@ -229,6 +229,58 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
229
229
  }
230
230
  recognizer.deepgramOptions = deepgramOptions;
231
231
  }
232
+ if (recognizer.vendor === 'deepgramflux') {
233
+ const { endpointing: spEndpointing, } = spDeepgramfluxOptions || {};
234
+ /*
235
+ * session params: 'endpointing' is a boolean
236
+ * ssc node: 'deepgramfluxEndpointing' => boolean, 'deepgramfluxEndOfTurnThreshold' => number, 'deepgramfluxEndOfTurnTimeoutMs' => number
237
+ */
238
+ const isDeepgramfluxEndpointingEnabled = (_d = (typeof spEndpointing === "number" || deepgramfluxEndpointing)) !== null && _d !== void 0 ? _d : false;
239
+ const deepgramfluxOptions = {
240
+ endpointing: isDeepgramfluxEndpointingEnabled,
241
+ };
242
+ if (isDeepgramfluxEndpointingEnabled) {
243
+ deepgramfluxOptions.endOfTurnThreshold = deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7;
244
+ deepgramfluxOptions.endOfTurnTimeoutMs = deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000;
245
+ }
246
+ recognizer.deepgramfluxOptions = deepgramfluxOptions;
247
+ }
248
+ if (recognizer.vendor === 'speechmatics') {
249
+ const { endpointing: spEndpointing, } = spSpeechmaticsOptions || {};
250
+ /*
251
+ * session params: 'endpointing' is a number (milliseconds)
252
+ * ssc node: 'speechmaticsEndpointing' => boolean, 'speechmaticsEndpointingValue' => number
253
+ */
254
+ const isSpeechmaticsEndpointingEnabled = (_e = (typeof spEndpointing === "number" || speechmaticsEndpointing)) !== null && _e !== void 0 ? _e : false;
255
+ const speechmaticsOptions = {
256
+ transcription_config: {},
257
+ };
258
+ if (isSpeechmaticsEndpointingEnabled) {
259
+ speechmaticsOptions.endpointing = (_f = (spEndpointing || speechmaticsEndpointingValue)) !== null && _f !== void 0 ? _f : 500;
260
+ }
261
+ // When endpointing is disabled, simply don't include the property
262
+ // (the feature-server will use its default SPEECHMATICS_END_OF_UTTERANCE_SILENCE_DURATION_MS)
263
+ recognizer.speechmaticsOptions = speechmaticsOptions;
264
+ }
265
+ if (recognizer.vendor === 'openai') {
266
+ const openaiModel = spModel || sttModel;
267
+ if (openaiModel) {
268
+ recognizer.openaiOptions = Object.assign(Object.assign({}, ((_g = recognizer.openaiOptions) !== null && _g !== void 0 ? _g : {})), { model: openaiModel });
269
+ }
270
+ const { endpointing: spEndpointing, } = spOpenaiOptions || {};
271
+ /*
272
+ * session params: 'endpointing' is a number (milliseconds)
273
+ * ssc node: 'openaiEndpointing' => boolean, 'openaiEndpointingValue' => number
274
+ */
275
+ const isOpenaiEndpointingEnabled = (_h = (typeof spEndpointing === "number" || openaiEndpointing)) !== null && _h !== void 0 ? _h : false;
276
+ const openaiOptions = Object.assign({}, ((_j = recognizer.openaiOptions) !== null && _j !== void 0 ? _j : {}));
277
+ if (isOpenaiEndpointingEnabled) {
278
+ openaiOptions.endpointing = (_k = (spEndpointing || openaiEndpointingValue)) !== null && _k !== void 0 ? _k : 500;
279
+ }
280
+ // When endpointing is disabled, simply don't include the property
281
+ // (the feature-server will use its default OPENAI_TURN_DETECTION_SILENCE_DURATION_MS)
282
+ recognizer.openaiOptions = openaiOptions;
283
+ }
232
284
  }
233
285
  if (this.has(spVad) || this.has(vad)) {
234
286
  const { enable: spEnable, mode: spMode, voiceMs: spVoiceMs } = spVad || {};
@@ -295,7 +347,7 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
295
347
  }
296
348
  const mapper = new SessionConfigMapper("voiceGateway2");
297
349
  function voiceConfigParamsToVoiceSettings(config, api) {
298
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
350
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
299
351
  let voiceSettings = {};
300
352
  if (config.sttVendor === 'none') {
301
353
  delete config.sttVendor;
@@ -332,25 +384,33 @@ function voiceConfigParamsToVoiceSettings(config, api) {
332
384
  hints = [...hints, ...config.sttHints];
333
385
  }
334
386
  const deepgramEndpointing = (_d = config.deepgramEndpointing) !== null && _d !== void 0 ? _d : true;
387
+ const deepgramfluxEndpointing = (_e = config.deepgramfluxEndpointing) !== null && _e !== void 0 ? _e : true;
335
388
  const deepgramShortUtterance = deepgramEndpointing ? false : true;
336
389
  // stt (recognizer)
337
390
  voiceSettings.stt = {
338
391
  sttVendor: config.sttVendor,
339
392
  sttLanguage: config.sttLanguage,
340
393
  sttHints: hints,
341
- sttLabel: (_e = config.sttLabel) !== null && _e !== void 0 ? _e : undefined,
394
+ sttLabel: (_f = config.sttLabel) !== null && _f !== void 0 ? _f : undefined,
342
395
  sttDisablePunctuation: config.sttDisablePunctuation !== undefined &&
343
396
  config.sttDisablePunctuation !== null
344
397
  ? !config.sttDisablePunctuation
345
398
  : undefined,
346
- googleModel: (_f = config.googleModel) !== null && _f !== void 0 ? _f : undefined,
399
+ googleModel: (_g = config.googleModel) !== null && _g !== void 0 ? _g : undefined,
347
400
  /* by default we enable endpointing - it is only undefined via SAP */
348
401
  deepgramEndpointing,
349
- deepgramEndpointingValue: (_g = config.deepgramEndpointingValue) !== null && _g !== void 0 ? _g : 250,
350
- sttDeepgramModel: (_h = config.sttDeepgramModel) !== null && _h !== void 0 ? _h : "nova-2",
351
- deepgramSmartFormatting: (_j = config.deepgramSmartFormatting) !== null && _j !== void 0 ? _j : undefined,
402
+ deepgramEndpointingValue: (_h = config.deepgramEndpointingValue) !== null && _h !== void 0 ? _h : 250,
403
+ sttModel: config.sttModel || "",
404
+ deepgramfluxEndpointing,
405
+ deepgramfluxEndOfTurnThreshold: (_j = config.deepgramfluxEndOfTurnThreshold) !== null && _j !== void 0 ? _j : 0.7,
406
+ deepgramfluxEndOfTurnTimeoutMs: (_k = config.deepgramfluxEndOfTurnTimeoutMs) !== null && _k !== void 0 ? _k : 5000,
407
+ deepgramSmartFormatting: (_l = config.deepgramSmartFormatting) !== null && _l !== void 0 ? _l : undefined,
352
408
  deepgramShortUtterance,
353
- listenDuringPrompt: (_k = config.sttListenDuringPrompt) !== null && _k !== void 0 ? _k : undefined,
409
+ speechmaticsEndpointing: (_m = config.speechmaticsEndpointing) !== null && _m !== void 0 ? _m : true,
410
+ speechmaticsEndpointingValue: (_o = config.speechmaticsEndpointingValue) !== null && _o !== void 0 ? _o : 500,
411
+ openaiEndpointing: (_p = config.openaiEndpointing) !== null && _p !== void 0 ? _p : true,
412
+ openaiEndpointingValue: (_q = config.openaiEndpointingValue) !== null && _q !== void 0 ? _q : 500,
413
+ listenDuringPrompt: (_r = config.sttListenDuringPrompt) !== null && _r !== void 0 ? _r : undefined,
354
414
  };
355
415
  // tts (synthesizer)
356
416
  voiceSettings.tts = {
@@ -395,7 +455,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
395
455
  voiceSettings.stt.altLanguages = [];
396
456
  }
397
457
  }
398
- if (config.ttsVendor === "elevenlabs") {
458
+ if (config.ttsVendor === "elevenlabs" || config.ttsVendor === "openai") {
399
459
  voiceSettings.tts.ttsModel = config.ttsModel;
400
460
  }
401
461
  // userNoInput
@@ -417,7 +477,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
417
477
  flowNoInputFail: config.flowNoInputFail
418
478
  };
419
479
  // Check if userNoInputTimeout has a value and userNoInputTimeoutEnable is null or undefined to cover generic nodes
420
- if (((_l = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _l === void 0 ? void 0 : _l.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
480
+ if (((_s = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _s === void 0 ? void 0 : _s.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
421
481
  voiceSettings.userNoInput.userNoInputTimeoutEnable = true;
422
482
  }
423
483
  voiceSettings.dtmf = {
@@ -425,7 +485,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
425
485
  dtmfInterDigitTimeout: config.dtmfInterDigitTimeout,
426
486
  dtmfMaxDigits: config.dtmfMaxDigits,
427
487
  dtmfMinDigits: config.dtmfMinDigits,
428
- dtmfSubmitDigit: (_m = config.dtmfSubmitDigit) === null || _m === void 0 ? void 0 : _m.trim(),
488
+ dtmfSubmitDigit: (_t = config.dtmfSubmitDigit) === null || _t === void 0 ? void 0 : _t.trim(),
429
489
  };
430
490
  if (config === null || config === void 0 ? void 0 : config.dtmfEnable) {
431
491
  if (voiceSettings.dtmf.dtmfSubmitDigit &&
@@ -462,7 +522,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
462
522
  }
463
523
  // atmosphere sounds
464
524
  if (config.atmosphereAction) {
465
- if ((_o = config.atmosphereUrl) === null || _o === void 0 ? void 0 : _o.length) {
525
+ if ((_u = config.atmosphereUrl) === null || _u === void 0 ? void 0 : _u.length) {
466
526
  if (!(0, helper_1.isValidUrl)(config.atmosphereUrl)) {
467
527
  throw new Error(`Audio file URL is invalid ${config.atmosphereUrl}`);
468
528
  }
@@ -5,7 +5,7 @@ exports.prepareTransferParams = exports.transfer = void 0;
5
5
  const helper_1 = require("../../../descriptors/voicegateway2/utils/helper");
6
6
  const helper_2 = require("../utils/helper");
7
7
  exports.transfer = {
8
- handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
8
+ handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
9
9
  try {
10
10
  switch (endpointType) {
11
11
  case "bandwidth":
@@ -24,14 +24,15 @@ exports.transfer = {
24
24
  return this.handleAudioCodesInput((0, exports.prepareTransferParams)(params), endpointType);
25
25
  case "voiceGateway2":
26
26
  default:
27
- return this.handleVGInput((0, exports.prepareTransferParams)(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia);
27
+ return this.handleVGInput((0, exports.prepareTransferParams)(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia);
28
28
  }
29
29
  }
30
30
  catch (error) {
31
31
  throw Error(error.message);
32
32
  }
33
33
  },
34
- handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttDeepgramModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
34
+ handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
35
+ var _a;
35
36
  const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
36
37
  const payload = {
37
38
  _voiceGateway2: {
@@ -122,14 +123,35 @@ exports.transfer = {
122
123
  recognizer.model = googleModel;
123
124
  }
124
125
  if (recognizer.vendor === 'deepgram') {
125
- recognizer.model = sttDeepgramModel;
126
+ recognizer.model = sttModel;
126
127
  recognizer.deepgramOptions = {
127
- model: sttDeepgramModel,
128
+ model: sttModel,
128
129
  punctuate: !sttDisablePunctuation,
129
130
  endpointing: deepgramEndpointing ? deepgramEndpointingValue : false,
130
131
  smartFormatting: deepgramSmartFormatting !== null && deepgramSmartFormatting !== void 0 ? deepgramSmartFormatting : false
131
132
  };
132
133
  }
134
+ if (recognizer.vendor === 'deepgramflux') {
135
+ recognizer.deepgramfluxOptions = {
136
+ endpointing: deepgramfluxEndpointing || true,
137
+ endOfTurnThreshold: deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7,
138
+ endOfTurnTimeoutMs: deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000
139
+ };
140
+ }
141
+ if (recognizer.vendor === 'speechmatics') {
142
+ recognizer.speechmaticsOptions = {
143
+ transcription_config: {},
144
+ };
145
+ if (speechmaticsEndpointing) {
146
+ recognizer.speechmaticsOptions.endpointing = speechmaticsEndpointingValue !== null && speechmaticsEndpointingValue !== void 0 ? speechmaticsEndpointingValue : 500;
147
+ }
148
+ }
149
+ if (recognizer.vendor === 'openai') {
150
+ recognizer.openaiOptions = Object.assign({}, ((_a = recognizer.openaiOptions) !== null && _a !== void 0 ? _a : {}));
151
+ if (openaiEndpointing) {
152
+ recognizer.openaiOptions.endpointing = openaiEndpointingValue !== null && openaiEndpointingValue !== void 0 ? openaiEndpointingValue : 500;
153
+ }
154
+ }
133
155
  if (sttLabel) {
134
156
  recognizer.label = sttLabel;
135
157
  }
@@ -127,6 +127,64 @@ exports.voiceConfigFields = [
127
127
  value: "deepgram"
128
128
  }
129
129
  },
130
+ {
131
+ key: "deepgramfluxEndpointing",
132
+ type: "toggle",
133
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
134
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
135
+ defaultValue: true,
136
+ condition: {
137
+ key: "sttVendor",
138
+ value: "deepgramflux"
139
+ }
140
+ },
141
+ {
142
+ key: "deepgramfluxEndOfTurnThreshold",
143
+ type: "slider",
144
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
145
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
146
+ defaultValue: 0.7,
147
+ params: {
148
+ min: 0.5,
149
+ max: 0.9,
150
+ step: 0.1
151
+ },
152
+ condition: {
153
+ and: [
154
+ {
155
+ key: "sttVendor",
156
+ value: "deepgramflux"
157
+ },
158
+ {
159
+ key: "deepgramfluxEndpointing",
160
+ value: true
161
+ },
162
+ ]
163
+ }
164
+ },
165
+ {
166
+ key: "deepgramfluxEndOfTurnTimeoutMs",
167
+ type: "number",
168
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
169
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
170
+ defaultValue: 5000,
171
+ params: {
172
+ min: 500,
173
+ max: 10000
174
+ },
175
+ condition: {
176
+ and: [
177
+ {
178
+ key: "sttVendor",
179
+ value: "deepgramflux"
180
+ },
181
+ {
182
+ key: "deepgramfluxEndpointing",
183
+ value: true
184
+ },
185
+ ]
186
+ }
187
+ },
130
188
  {
131
189
  key: "enableAdvancedSTTConfig",
132
190
  type: "toggle",
@@ -264,7 +322,7 @@ exports.voiceConfigFields = [
264
322
  defaultValue: "",
265
323
  params: {
266
324
  languageKey: "config.sttLanguage",
267
- modelKey: "config.sttDeepgramModel",
325
+ modelKey: "config.sttModel",
268
326
  }
269
327
  },
270
328
  {
@@ -275,7 +333,7 @@ exports.voiceConfigFields = [
275
333
  defaultValue: "",
276
334
  },
277
335
  {
278
- key: "sttDeepgramModel",
336
+ key: "sttModel",
279
337
  type: "sttSelect",
280
338
  label: "_unused_",
281
339
  description: "_unused_",
@@ -305,6 +363,13 @@ exports.SESSION_SPEECH_PARAMETERS = (0, createNodeDescriptor_1.createNodeDescrip
305
363
  "deepgramEndpointing",
306
364
  "deepgramEndpointingValue",
307
365
  "deepgramSmartFormatting",
366
+ "deepgramfluxEndpointing",
367
+ "deepgramfluxEndOfTurnThreshold",
368
+ "deepgramfluxEndOfTurnTimeoutMs",
369
+ "speechmaticsEndpointing",
370
+ "speechmaticsEndpointingValue",
371
+ "openaiEndpointing",
372
+ "openaiEndpointingValue",
308
373
  "sttHints",
309
374
  "sttHintsDynamicHints",
310
375
  "googleModel",
@@ -73,6 +73,13 @@ exports.playNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
73
73
  "deepgramEndpointing",
74
74
  "deepgramEndpointingValue",
75
75
  "deepgramSmartFormatting",
76
+ "deepgramfluxEndpointing",
77
+ "deepgramfluxEndOfTurnThreshold",
78
+ "deepgramfluxEndOfTurnTimeoutMs",
79
+ "speechmaticsEndpointing",
80
+ "speechmaticsEndpointingValue",
81
+ "openaiEndpointing",
82
+ "openaiEndpointingValue",
76
83
  "sttDisablePunctuation",
77
84
  "sttVadEnabled",
78
85
  "sttVadMode",