@cognigy/rest-api-client 2026.1.0 → 2026.2.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/README.md +15 -0
  3. package/build/apigroups/InsightsAPIGroup_2_1.js +27 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +134 -383
  5. package/build/apigroups/SimulationAPIGroup_2_0.js +24 -23
  6. package/build/apigroups/aiAgentsV2/agent.js +3 -0
  7. package/build/apigroups/aiAgentsV2/agentAPI.js +38 -0
  8. package/build/apigroups/aiAgentsV2/agentPersona.js +3 -0
  9. package/build/apigroups/aiAgentsV2/agentPersonaAPI.js +38 -0
  10. package/build/apigroups/aiAgentsV2/tool.js +3 -0
  11. package/build/apigroups/aiAgentsV2/toolAPI.js +35 -0
  12. package/build/apigroups/aiAgentsV2/toolDescriptor.js +3 -0
  13. package/build/apigroups/aiAgentsV2/toolDescriptorAPI.js +13 -0
  14. package/build/apigroups/index.js +3 -1
  15. package/build/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  16. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +15 -0
  17. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +13 -0
  18. package/build/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
  19. package/build/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +52 -0
  20. package/build/shared/charts/descriptors/connectionNodes/speechProviders/index.js +8 -7
  21. package/build/shared/charts/descriptors/index.js +4 -0
  22. package/build/shared/charts/descriptors/message/question/question.js +249 -59
  23. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
  24. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
  25. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +57 -1
  26. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
  27. package/build/shared/charts/descriptors/service/aiAgentV2.js +89 -0
  28. package/build/shared/charts/descriptors/service/index.js +5 -1
  29. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +15 -13
  30. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +57 -1
  31. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
  32. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
  33. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
  34. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  35. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
  36. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
  37. package/build/shared/errors/ErrorCode.js +2 -1
  38. package/build/shared/errors/ErrorCollection.js +1 -0
  39. package/build/shared/helper/BaseContext.js +1 -1
  40. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  41. package/build/shared/interfaces/handover.js +1 -0
  42. package/build/shared/interfaces/handoverProviders.js +0 -1
  43. package/build/shared/interfaces/messageAPI/endpoints.js +3 -0
  44. package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
  45. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  46. package/build/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  47. package/build/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +3 -0
  48. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +3 -0
  49. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +3 -0
  50. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +3 -0
  51. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +3 -0
  52. package/build/shared/interfaces/security/ISessionScope.js +3 -0
  53. package/build/spec/aiAgentV2.spec.js +564 -0
  54. package/dist/esm/apigroups/InsightsAPIGroup_2_1.js +13 -0
  55. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +134 -383
  56. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +24 -23
  57. package/dist/esm/apigroups/aiAgentsV2/agent.js +2 -0
  58. package/dist/esm/apigroups/aiAgentsV2/agentAPI.js +24 -0
  59. package/dist/esm/apigroups/aiAgentsV2/agentPersona.js +2 -0
  60. package/dist/esm/apigroups/aiAgentsV2/agentPersonaAPI.js +24 -0
  61. package/dist/esm/apigroups/aiAgentsV2/aiAgentV2API.js +2 -0
  62. package/dist/esm/apigroups/aiAgentsV2/tool.js +2 -0
  63. package/dist/esm/apigroups/aiAgentsV2/toolAPI.js +21 -0
  64. package/dist/esm/apigroups/aiAgentsV2/toolDescriptor.js +2 -0
  65. package/dist/esm/apigroups/aiAgentsV2/toolDescriptorAPI.js +9 -0
  66. package/dist/esm/apigroups/index.js +1 -0
  67. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  68. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +12 -0
  69. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +10 -0
  70. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
  71. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +49 -0
  72. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/index.js +3 -3
  73. package/dist/esm/shared/charts/descriptors/index.js +5 -1
  74. package/dist/esm/shared/charts/descriptors/message/question/question.js +249 -59
  75. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
  76. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
  77. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +56 -0
  78. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
  79. package/dist/esm/shared/charts/descriptors/service/aiAgentV2.js +87 -0
  80. package/dist/esm/shared/charts/descriptors/service/index.js +2 -0
  81. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +22 -20
  82. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +56 -0
  83. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
  84. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
  85. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
  86. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  87. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
  88. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
  89. package/dist/esm/shared/errors/ErrorCode.js +2 -1
  90. package/dist/esm/shared/errors/ErrorCollection.js +1 -0
  91. package/dist/esm/shared/helper/BaseContext.js +1 -1
  92. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  93. package/dist/esm/shared/interfaces/handover.js +1 -0
  94. package/dist/esm/shared/interfaces/handoverProviders.js +0 -1
  95. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +3 -0
  96. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +1 -0
  97. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  98. package/dist/esm/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  99. package/dist/esm/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +2 -0
  100. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +2 -0
  101. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +2 -0
  102. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +2 -0
  103. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +2 -0
  104. package/dist/esm/shared/interfaces/security/ISessionScope.js +2 -0
  105. package/dist/esm/spec/aiAgentV2.spec.js +563 -0
  106. package/package.json +6 -3
  107. package/types/index.d.ts +667 -30
@@ -178,10 +178,10 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
178
178
  return synthesizer;
179
179
  }
180
180
  buildRecognizer(sessionParams, stt, vad, azureConfig) {
181
- var _a, _b, _c, _d;
181
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
182
182
  const { recognizer: sessionParamsRecognizer } = sessionParams || {};
183
- const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
184
- const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [] } = stt || {};
183
+ const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, deepgramfluxOptions: spDeepgramfluxOptions, speechmaticsOptions: spSpeechmaticsOptions, openaiOptions: spOpenaiOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
184
+ const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [], speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue } = stt || {};
185
185
  const recognizer = {};
186
186
  recognizer.language = spLanguage || sttLanguage || undefined;
187
187
  recognizer.hints = spHints || sttHints || undefined;
@@ -229,11 +229,57 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
229
229
  }
230
230
  recognizer.deepgramOptions = deepgramOptions;
231
231
  }
232
+ if (recognizer.vendor === 'deepgramflux') {
233
+ const { endpointing: spEndpointing, } = spDeepgramfluxOptions || {};
234
+ /*
235
+ * session params: 'endpointing' is a boolean
236
+ * ssc node: 'deepgramfluxEndpointing' => boolean, 'deepgramfluxEndOfTurnThreshold' => number, 'deepgramfluxEndOfTurnTimeoutMs' => number
237
+ */
238
+ const isDeepgramfluxEndpointingEnabled = (_d = (typeof spEndpointing === "number" || deepgramfluxEndpointing)) !== null && _d !== void 0 ? _d : false;
239
+ const deepgramfluxOptions = {
240
+ endpointing: isDeepgramfluxEndpointingEnabled,
241
+ };
242
+ if (isDeepgramfluxEndpointingEnabled) {
243
+ deepgramfluxOptions.endOfTurnThreshold = deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7;
244
+ deepgramfluxOptions.endOfTurnTimeoutMs = deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000;
245
+ }
246
+ recognizer.deepgramfluxOptions = deepgramfluxOptions;
247
+ }
248
+ if (recognizer.vendor === 'speechmatics') {
249
+ const { endpointing: spEndpointing, } = spSpeechmaticsOptions || {};
250
+ /*
251
+ * session params: 'endpointing' is a number (milliseconds)
252
+ * ssc node: 'speechmaticsEndpointing' => boolean, 'speechmaticsEndpointingValue' => number
253
+ */
254
+ const isSpeechmaticsEndpointingEnabled = (_e = (typeof spEndpointing === "number" || speechmaticsEndpointing)) !== null && _e !== void 0 ? _e : false;
255
+ const speechmaticsOptions = {
256
+ transcription_config: {},
257
+ };
258
+ if (isSpeechmaticsEndpointingEnabled) {
259
+ speechmaticsOptions.endpointing = (_f = (spEndpointing || speechmaticsEndpointingValue)) !== null && _f !== void 0 ? _f : 500;
260
+ }
261
+ // When endpointing is disabled, simply don't include the property
262
+ // (the feature-server will use its default SPEECHMATICS_END_OF_UTTERANCE_SILENCE_DURATION_MS)
263
+ recognizer.speechmaticsOptions = speechmaticsOptions;
264
+ }
232
265
  if (recognizer.vendor === 'openai') {
233
266
  const openaiModel = spModel || sttModel;
234
267
  if (openaiModel) {
235
- recognizer.openaiOptions = Object.assign(Object.assign({}, ((_d = recognizer.openaiOptions) !== null && _d !== void 0 ? _d : {})), { model: openaiModel });
268
+ recognizer.openaiOptions = Object.assign(Object.assign({}, ((_g = recognizer.openaiOptions) !== null && _g !== void 0 ? _g : {})), { model: openaiModel });
269
+ }
270
+ const { endpointing: spEndpointing, } = spOpenaiOptions || {};
271
+ /*
272
+ * session params: 'endpointing' is a number (milliseconds)
273
+ * ssc node: 'openaiEndpointing' => boolean, 'openaiEndpointingValue' => number
274
+ */
275
+ const isOpenaiEndpointingEnabled = (_h = (typeof spEndpointing === "number" || openaiEndpointing)) !== null && _h !== void 0 ? _h : false;
276
+ const openaiOptions = Object.assign({}, ((_j = recognizer.openaiOptions) !== null && _j !== void 0 ? _j : {}));
277
+ if (isOpenaiEndpointingEnabled) {
278
+ openaiOptions.endpointing = (_k = (spEndpointing || openaiEndpointingValue)) !== null && _k !== void 0 ? _k : 500;
236
279
  }
280
+ // When endpointing is disabled, simply don't include the property
281
+ // (the feature-server will use its default OPENAI_TURN_DETECTION_SILENCE_DURATION_MS)
282
+ recognizer.openaiOptions = openaiOptions;
237
283
  }
238
284
  }
239
285
  if (this.has(spVad) || this.has(vad)) {
@@ -301,7 +347,7 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
301
347
  }
302
348
  const mapper = new SessionConfigMapper("voiceGateway2");
303
349
  function voiceConfigParamsToVoiceSettings(config, api) {
304
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m;
350
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
305
351
  let voiceSettings = {};
306
352
  if (config.sttVendor === 'none') {
307
353
  delete config.sttVendor;
@@ -338,25 +384,33 @@ function voiceConfigParamsToVoiceSettings(config, api) {
338
384
  hints = [...hints, ...config.sttHints];
339
385
  }
340
386
  const deepgramEndpointing = (_d = config.deepgramEndpointing) !== null && _d !== void 0 ? _d : true;
387
+ const deepgramfluxEndpointing = (_e = config.deepgramfluxEndpointing) !== null && _e !== void 0 ? _e : true;
341
388
  const deepgramShortUtterance = deepgramEndpointing ? false : true;
342
389
  // stt (recognizer)
343
390
  voiceSettings.stt = {
344
391
  sttVendor: config.sttVendor,
345
392
  sttLanguage: config.sttLanguage,
346
393
  sttHints: hints,
347
- sttLabel: (_e = config.sttLabel) !== null && _e !== void 0 ? _e : undefined,
394
+ sttLabel: (_f = config.sttLabel) !== null && _f !== void 0 ? _f : undefined,
348
395
  sttDisablePunctuation: config.sttDisablePunctuation !== undefined &&
349
396
  config.sttDisablePunctuation !== null
350
397
  ? !config.sttDisablePunctuation
351
398
  : undefined,
352
- googleModel: (_f = config.googleModel) !== null && _f !== void 0 ? _f : undefined,
399
+ googleModel: (_g = config.googleModel) !== null && _g !== void 0 ? _g : undefined,
353
400
  /* by default we enable endpointing - it is only undefined via SAP */
354
401
  deepgramEndpointing,
355
- deepgramEndpointingValue: (_g = config.deepgramEndpointingValue) !== null && _g !== void 0 ? _g : 250,
402
+ deepgramEndpointingValue: (_h = config.deepgramEndpointingValue) !== null && _h !== void 0 ? _h : 250,
356
403
  sttModel: config.sttModel || "",
357
- deepgramSmartFormatting: (_h = config.deepgramSmartFormatting) !== null && _h !== void 0 ? _h : undefined,
404
+ deepgramfluxEndpointing,
405
+ deepgramfluxEndOfTurnThreshold: (_j = config.deepgramfluxEndOfTurnThreshold) !== null && _j !== void 0 ? _j : 0.7,
406
+ deepgramfluxEndOfTurnTimeoutMs: (_k = config.deepgramfluxEndOfTurnTimeoutMs) !== null && _k !== void 0 ? _k : 5000,
407
+ deepgramSmartFormatting: (_l = config.deepgramSmartFormatting) !== null && _l !== void 0 ? _l : undefined,
358
408
  deepgramShortUtterance,
359
- listenDuringPrompt: (_j = config.sttListenDuringPrompt) !== null && _j !== void 0 ? _j : undefined,
409
+ speechmaticsEndpointing: (_m = config.speechmaticsEndpointing) !== null && _m !== void 0 ? _m : true,
410
+ speechmaticsEndpointingValue: (_o = config.speechmaticsEndpointingValue) !== null && _o !== void 0 ? _o : 500,
411
+ openaiEndpointing: (_p = config.openaiEndpointing) !== null && _p !== void 0 ? _p : true,
412
+ openaiEndpointingValue: (_q = config.openaiEndpointingValue) !== null && _q !== void 0 ? _q : 500,
413
+ listenDuringPrompt: (_r = config.sttListenDuringPrompt) !== null && _r !== void 0 ? _r : undefined,
360
414
  };
361
415
  // tts (synthesizer)
362
416
  voiceSettings.tts = {
@@ -423,7 +477,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
423
477
  flowNoInputFail: config.flowNoInputFail
424
478
  };
425
479
  // Check if userNoInputTimeout has a value and userNoInputTimeoutEnable is null or undefined to cover generic nodes
426
- if (((_k = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _k === void 0 ? void 0 : _k.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
480
+ if (((_s = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _s === void 0 ? void 0 : _s.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
427
481
  voiceSettings.userNoInput.userNoInputTimeoutEnable = true;
428
482
  }
429
483
  voiceSettings.dtmf = {
@@ -431,7 +485,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
431
485
  dtmfInterDigitTimeout: config.dtmfInterDigitTimeout,
432
486
  dtmfMaxDigits: config.dtmfMaxDigits,
433
487
  dtmfMinDigits: config.dtmfMinDigits,
434
- dtmfSubmitDigit: (_l = config.dtmfSubmitDigit) === null || _l === void 0 ? void 0 : _l.trim(),
488
+ dtmfSubmitDigit: (_t = config.dtmfSubmitDigit) === null || _t === void 0 ? void 0 : _t.trim(),
435
489
  };
436
490
  if (config === null || config === void 0 ? void 0 : config.dtmfEnable) {
437
491
  if (voiceSettings.dtmf.dtmfSubmitDigit &&
@@ -468,7 +522,7 @@ function voiceConfigParamsToVoiceSettings(config, api) {
468
522
  }
469
523
  // atmosphere sounds
470
524
  if (config.atmosphereAction) {
471
- if ((_m = config.atmosphereUrl) === null || _m === void 0 ? void 0 : _m.length) {
525
+ if ((_u = config.atmosphereUrl) === null || _u === void 0 ? void 0 : _u.length) {
472
526
  if (!(0, helper_1.isValidUrl)(config.atmosphereUrl)) {
473
527
  throw new Error(`Audio file URL is invalid ${config.atmosphereUrl}`);
474
528
  }
@@ -5,7 +5,7 @@ exports.prepareTransferParams = exports.transfer = void 0;
5
5
  const helper_1 = require("../../../descriptors/voicegateway2/utils/helper");
6
6
  const helper_2 = require("../utils/helper");
7
7
  exports.transfer = {
8
- handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
8
+ handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
9
9
  try {
10
10
  switch (endpointType) {
11
11
  case "bandwidth":
@@ -24,14 +24,15 @@ exports.transfer = {
24
24
  return this.handleAudioCodesInput((0, exports.prepareTransferParams)(params), endpointType);
25
25
  case "voiceGateway2":
26
26
  default:
27
- return this.handleVGInput((0, exports.prepareTransferParams)(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia);
27
+ return this.handleVGInput((0, exports.prepareTransferParams)(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia);
28
28
  }
29
29
  }
30
30
  catch (error) {
31
31
  throw Error(error.message);
32
32
  }
33
33
  },
34
- handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
34
+ handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
35
+ var _a;
35
36
  const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
36
37
  const payload = {
37
38
  _voiceGateway2: {
@@ -130,6 +131,27 @@ exports.transfer = {
130
131
  smartFormatting: deepgramSmartFormatting !== null && deepgramSmartFormatting !== void 0 ? deepgramSmartFormatting : false
131
132
  };
132
133
  }
134
+ if (recognizer.vendor === 'deepgramflux') {
135
+ recognizer.deepgramfluxOptions = {
136
+ endpointing: deepgramfluxEndpointing || true,
137
+ endOfTurnThreshold: deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7,
138
+ endOfTurnTimeoutMs: deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000
139
+ };
140
+ }
141
+ if (recognizer.vendor === 'speechmatics') {
142
+ recognizer.speechmaticsOptions = {
143
+ transcription_config: {},
144
+ };
145
+ if (speechmaticsEndpointing) {
146
+ recognizer.speechmaticsOptions.endpointing = speechmaticsEndpointingValue !== null && speechmaticsEndpointingValue !== void 0 ? speechmaticsEndpointingValue : 500;
147
+ }
148
+ }
149
+ if (recognizer.vendor === 'openai') {
150
+ recognizer.openaiOptions = Object.assign({}, ((_a = recognizer.openaiOptions) !== null && _a !== void 0 ? _a : {}));
151
+ if (openaiEndpointing) {
152
+ recognizer.openaiOptions.endpointing = openaiEndpointingValue !== null && openaiEndpointingValue !== void 0 ? openaiEndpointingValue : 500;
153
+ }
154
+ }
133
155
  if (sttLabel) {
134
156
  recognizer.label = sttLabel;
135
157
  }
@@ -127,6 +127,64 @@ exports.voiceConfigFields = [
127
127
  value: "deepgram"
128
128
  }
129
129
  },
130
+ {
131
+ key: "deepgramfluxEndpointing",
132
+ type: "toggle",
133
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
134
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
135
+ defaultValue: true,
136
+ condition: {
137
+ key: "sttVendor",
138
+ value: "deepgramflux"
139
+ }
140
+ },
141
+ {
142
+ key: "deepgramfluxEndOfTurnThreshold",
143
+ type: "slider",
144
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
145
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
146
+ defaultValue: 0.7,
147
+ params: {
148
+ min: 0.5,
149
+ max: 0.9,
150
+ step: 0.1
151
+ },
152
+ condition: {
153
+ and: [
154
+ {
155
+ key: "sttVendor",
156
+ value: "deepgramflux"
157
+ },
158
+ {
159
+ key: "deepgramfluxEndpointing",
160
+ value: true
161
+ },
162
+ ]
163
+ }
164
+ },
165
+ {
166
+ key: "deepgramfluxEndOfTurnTimeoutMs",
167
+ type: "number",
168
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
169
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
170
+ defaultValue: 5000,
171
+ params: {
172
+ min: 500,
173
+ max: 10000
174
+ },
175
+ condition: {
176
+ and: [
177
+ {
178
+ key: "sttVendor",
179
+ value: "deepgramflux"
180
+ },
181
+ {
182
+ key: "deepgramfluxEndpointing",
183
+ value: true
184
+ },
185
+ ]
186
+ }
187
+ },
130
188
  {
131
189
  key: "enableAdvancedSTTConfig",
132
190
  type: "toggle",
@@ -305,6 +363,13 @@ exports.SESSION_SPEECH_PARAMETERS = (0, createNodeDescriptor_1.createNodeDescrip
305
363
  "deepgramEndpointing",
306
364
  "deepgramEndpointingValue",
307
365
  "deepgramSmartFormatting",
366
+ "deepgramfluxEndpointing",
367
+ "deepgramfluxEndOfTurnThreshold",
368
+ "deepgramfluxEndOfTurnTimeoutMs",
369
+ "speechmaticsEndpointing",
370
+ "speechmaticsEndpointingValue",
371
+ "openaiEndpointing",
372
+ "openaiEndpointingValue",
308
373
  "sttHints",
309
374
  "sttHintsDynamicHints",
310
375
  "googleModel",
@@ -73,6 +73,13 @@ exports.playNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
73
73
  "deepgramEndpointing",
74
74
  "deepgramEndpointingValue",
75
75
  "deepgramSmartFormatting",
76
+ "deepgramfluxEndpointing",
77
+ "deepgramfluxEndOfTurnThreshold",
78
+ "deepgramfluxEndOfTurnTimeoutMs",
79
+ "speechmaticsEndpointing",
80
+ "speechmaticsEndpointingValue",
81
+ "openaiEndpointing",
82
+ "openaiEndpointingValue",
76
83
  "sttDisablePunctuation",
77
84
  "sttVadEnabled",
78
85
  "sttVadMode",
@@ -81,6 +81,132 @@ exports.voiceConfigFields = [
81
81
  value: "deepgram"
82
82
  }
83
83
  },
84
+ {
85
+ key: "deepgramfluxEndpointing",
86
+ type: "toggle",
87
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
88
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
89
+ defaultValue: true,
90
+ condition: {
91
+ key: "sttVendor",
92
+ value: "deepgramflux"
93
+ }
94
+ },
95
+ {
96
+ key: "deepgramfluxEndOfTurnThreshold",
97
+ type: "slider",
98
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
99
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
100
+ defaultValue: 0.7,
101
+ params: {
102
+ min: 0.5,
103
+ max: 0.9,
104
+ step: 0.1
105
+ },
106
+ condition: {
107
+ and: [
108
+ {
109
+ key: "sttVendor",
110
+ value: "deepgramflux"
111
+ },
112
+ {
113
+ key: "deepgramfluxEndpointing",
114
+ value: true
115
+ },
116
+ ]
117
+ }
118
+ },
119
+ {
120
+ key: "deepgramfluxEndOfTurnTimeoutMs",
121
+ type: "number",
122
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
123
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
124
+ defaultValue: 5000,
125
+ params: {
126
+ min: 500,
127
+ max: 10000
128
+ },
129
+ condition: {
130
+ and: [
131
+ {
132
+ key: "sttVendor",
133
+ value: "deepgramflux"
134
+ },
135
+ {
136
+ key: "deepgramfluxEndpointing",
137
+ value: true
138
+ },
139
+ ]
140
+ }
141
+ },
142
+ {
143
+ key: "speechmaticsEndpointing",
144
+ type: "toggle",
145
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__LABEL",
146
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__DESCRIPTION",
147
+ defaultValue: true,
148
+ condition: {
149
+ key: "sttVendor",
150
+ value: "speechmatics"
151
+ }
152
+ },
153
+ {
154
+ key: "speechmaticsEndpointingValue",
155
+ type: "number",
156
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__LABEL",
157
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__DESCRIPTION",
158
+ defaultValue: 500,
159
+ params: {
160
+ min: 10,
161
+ max: 5000
162
+ },
163
+ condition: {
164
+ and: [
165
+ {
166
+ key: "sttVendor",
167
+ value: "speechmatics"
168
+ },
169
+ {
170
+ key: "speechmaticsEndpointing",
171
+ value: true
172
+ },
173
+ ]
174
+ }
175
+ },
176
+ {
177
+ key: "openaiEndpointing",
178
+ type: "toggle",
179
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__LABEL",
180
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__DESCRIPTION",
181
+ defaultValue: true,
182
+ condition: {
183
+ key: "sttVendor",
184
+ value: "openai"
185
+ }
186
+ },
187
+ {
188
+ key: "openaiEndpointingValue",
189
+ type: "number",
190
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__LABEL",
191
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__DESCRIPTION",
192
+ defaultValue: 500,
193
+ params: {
194
+ min: 10,
195
+ max: 1000
196
+ },
197
+ condition: {
198
+ and: [
199
+ {
200
+ key: "sttVendor",
201
+ value: "openai"
202
+ },
203
+ {
204
+ key: "openaiEndpointing",
205
+ value: true
206
+ },
207
+ ]
208
+ }
209
+ },
84
210
  {
85
211
  key: "sttHints",
86
212
  type: "textArray",
@@ -109,6 +235,10 @@ exports.voiceConfigFields = [
109
235
  key: "sttVendor",
110
236
  value: "deepgram"
111
237
  },
238
+ {
239
+ key: "sttVendor",
240
+ value: "deepgramflux"
241
+ },
112
242
  ]
113
243
  }
114
244
  },
@@ -982,6 +1112,13 @@ exports.setSessionConfigNode = (0, createNodeDescriptor_1.createNodeDescriptor)(
982
1112
  "deepgramEndpointing",
983
1113
  "deepgramEndpointingValue",
984
1114
  "deepgramSmartFormatting",
1115
+ "deepgramfluxEndpointing",
1116
+ "deepgramfluxEndOfTurnThreshold",
1117
+ "deepgramfluxEndOfTurnTimeoutMs",
1118
+ "speechmaticsEndpointing",
1119
+ "speechmaticsEndpointingValue",
1120
+ "openaiEndpointing",
1121
+ "openaiEndpointingValue",
985
1122
  "sttHints",
986
1123
  "sttHintsDynamicHints",
987
1124
  "googleModel",
@@ -1104,7 +1241,6 @@ exports.setSessionConfigNode = (0, createNodeDescriptor_1.createNodeDescriptor)(
1104
1241
  try {
1105
1242
  const voiceSettings = (0, setSessionConfig_mapper_2.voiceConfigParamsToVoiceSettings)(config, api);
1106
1243
  const payload = setSessionConfig_mapper_1.setSessionConfig.handleVGInput(voiceSettings, sessionParams, api);
1107
- api.log("error", JSON.stringify(payload));
1108
1244
  await api.say(null, {
1109
1245
  _cognigy: payload,
1110
1246
  });
@@ -404,6 +404,132 @@ exports.transferNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
404
404
  value: "deepgram"
405
405
  }
406
406
  },
407
+ {
408
+ key: "deepgramfluxEndpointing",
409
+ type: "toggle",
410
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
411
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
412
+ defaultValue: false,
413
+ condition: {
414
+ key: "sttVendor",
415
+ value: "deepgramflux"
416
+ }
417
+ },
418
+ {
419
+ key: "deepgramfluxEndOfTurnThreshold",
420
+ type: "slider",
421
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
422
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
423
+ defaultValue: 0.7,
424
+ params: {
425
+ min: 0.5,
426
+ max: 0.9,
427
+ step: 0.1
428
+ },
429
+ condition: {
430
+ and: [
431
+ {
432
+ key: "sttVendor",
433
+ value: "deepgramflux"
434
+ },
435
+ {
436
+ key: "deepgramfluxEndpointing",
437
+ value: true
438
+ },
439
+ ]
440
+ }
441
+ },
442
+ {
443
+ key: "deepgramfluxEndOfTurnTimeoutMs",
444
+ type: "number",
445
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
446
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
447
+ defaultValue: 5000,
448
+ params: {
449
+ min: 500,
450
+ max: 10000
451
+ },
452
+ condition: {
453
+ and: [
454
+ {
455
+ key: "sttVendor",
456
+ value: "deepgramflux"
457
+ },
458
+ {
459
+ key: "deepgramfluxEndpointing",
460
+ value: true
461
+ },
462
+ ]
463
+ }
464
+ },
465
+ {
466
+ key: "speechmaticsEndpointing",
467
+ type: "toggle",
468
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__LABEL",
469
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__DESCRIPTION",
470
+ defaultValue: true,
471
+ condition: {
472
+ key: "sttVendor",
473
+ value: "speechmatics"
474
+ }
475
+ },
476
+ {
477
+ key: "speechmaticsEndpointingValue",
478
+ type: "number",
479
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__LABEL",
480
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__DESCRIPTION",
481
+ defaultValue: 500,
482
+ params: {
483
+ min: 10,
484
+ max: 5000
485
+ },
486
+ condition: {
487
+ and: [
488
+ {
489
+ key: "sttVendor",
490
+ value: "speechmatics"
491
+ },
492
+ {
493
+ key: "speechmaticsEndpointing",
494
+ value: true
495
+ },
496
+ ]
497
+ }
498
+ },
499
+ {
500
+ key: "openaiEndpointing",
501
+ type: "toggle",
502
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__LABEL",
503
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__DESCRIPTION",
504
+ defaultValue: true,
505
+ condition: {
506
+ key: "sttVendor",
507
+ value: "openai"
508
+ }
509
+ },
510
+ {
511
+ key: "openaiEndpointingValue",
512
+ type: "number",
513
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__LABEL",
514
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__DESCRIPTION",
515
+ defaultValue: 500,
516
+ params: {
517
+ min: 10,
518
+ max: 1000
519
+ },
520
+ condition: {
521
+ and: [
522
+ {
523
+ key: "sttVendor",
524
+ value: "openai"
525
+ },
526
+ {
527
+ key: "openaiEndpointing",
528
+ value: true
529
+ },
530
+ ]
531
+ }
532
+ },
407
533
  {
408
534
  key: "googleModel",
409
535
  type: "select",
@@ -464,6 +590,13 @@ exports.transferNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
464
590
  "deepgramEndpointing",
465
591
  "deepgramEndpointingValue",
466
592
  "deepgramSmartFormatting",
593
+ "deepgramfluxEndpointing",
594
+ "deepgramfluxEndOfTurnThreshold",
595
+ "deepgramfluxEndOfTurnTimeoutMs",
596
+ "speechmaticsEndpointing",
597
+ "speechmaticsEndpointingValue",
598
+ "openaiEndpointing",
599
+ "openaiEndpointingValue",
467
600
  "googleModel",
468
601
  "dialTranscriptionWebhook",
469
602
  "recognitionChannel"
@@ -553,7 +686,7 @@ exports.transferNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
553
686
  summary: "UI__NODE_EDITOR__VOICEGATEWAY2__TRANSFER__SUMMARY",
554
687
  function: async ({ cognigy, config, }) => {
555
688
  const { api, input } = cognigy;
556
- const { transferType, transferTarget, referredBy, mediaPath, useTransferSipHeaders, transferSipHeaders = {}, transferReason, dialMusic, dialTranscriptionWebhook, dialCallerId, recognitionChannel, sttVendor, sttLanguage, sttDisablePunctuation, dialTimeout, enableTimeLimit, timeLimit, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, sttLabel, googleModel, sttModel, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, agentAssistEnabled, agentAssistHeadersKey = customHeaderDefaultValue, anchorMedia } = config;
689
+ const { transferType, transferTarget, referredBy, mediaPath, useTransferSipHeaders, transferSipHeaders = {}, transferReason, dialMusic, dialTranscriptionWebhook, dialCallerId, recognitionChannel, sttVendor, sttLanguage, sttDisablePunctuation, dialTimeout, enableTimeLimit, timeLimit, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, sttLabel, googleModel, sttModel, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, agentAssistEnabled, agentAssistHeadersKey = customHeaderDefaultValue, anchorMedia } = config;
557
690
  const transferParams = {
558
691
  transferType,
559
692
  transferReason,
@@ -627,7 +760,7 @@ exports.transferNode = (0, createNodeDescriptor_1.createNodeDescriptor)({
627
760
  transferParams.useTransferSipHeaders = false;
628
761
  api.log("error", "Invalid JSON in Transfer SIP Headers");
629
762
  }
630
- const payload = transfer_mapper_1.transfer.handleInput("voiceGateway2", transferParams, false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, media, anchorMedia);
763
+ const payload = transfer_mapper_1.transfer.handleInput("voiceGateway2", transferParams, false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, media, anchorMedia);
631
764
  await api.say(null, {
632
765
  _cognigy: payload,
633
766
  });
@@ -6,9 +6,10 @@ var ErrorCode;
6
6
  ErrorCode[ErrorCode["BAD_REQUEST"] = 400] = "BAD_REQUEST";
7
7
  ErrorCode[ErrorCode["UNAUTHORIZED_ERROR"] = 401] = "UNAUTHORIZED_ERROR";
8
8
  ErrorCode[ErrorCode["PAYMENT_REQUIRED_ERROR"] = 402] = "PAYMENT_REQUIRED_ERROR";
9
+ ErrorCode[ErrorCode["FORBIDDEN_ERROR"] = 403] = "FORBIDDEN_ERROR";
10
+ ErrorCode[ErrorCode["NOT_FOUND"] = 404] = "NOT_FOUND";
9
11
  ErrorCode[ErrorCode["PAYLOAD_TOO_LARGE_ERROR"] = 413] = "PAYLOAD_TOO_LARGE_ERROR";
10
12
  ErrorCode[ErrorCode["TOO_MANY_REQUESTS_ERROR"] = 429] = "TOO_MANY_REQUESTS_ERROR";
11
- ErrorCode[ErrorCode["FORBIDDEN_ERROR"] = 403] = "FORBIDDEN_ERROR";
12
13
  ErrorCode[ErrorCode["BAD_GATEWAY"] = 502] = "BAD_GATEWAY";
13
14
  ErrorCode[ErrorCode["SERVICE_UNAVAILABLE_ERROR"] = 503] = "SERVICE_UNAVAILABLE_ERROR";
14
15
  ErrorCode[ErrorCode["GATEWAY_TIMEOUT_ERROR"] = 504] = "GATEWAY_TIMEOUT_ERROR";
@@ -51,6 +51,7 @@ exports.ErrorCollection = {
51
51
  [ErrorCode_1.ErrorCode.METHOD_NOT_ALLOWED_ERROR]: MethodNotAllowedError_1.MethodNotAllowedError,
52
52
  [ErrorCode_1.ErrorCode.MISSING_ARGUMENT_ERROR]: missingArgument_1.MissingArgumentError,
53
53
  [ErrorCode_1.ErrorCode.NETWORK_ERROR]: NetworkError_1.NetworkError,
54
+ [ErrorCode_1.ErrorCode.NOT_FOUND]: resourceNotFound_1.ResourceNotFoundError,
54
55
  [ErrorCode_1.ErrorCode.NOT_IMPLEMENTED_ERROR]: notImplementedError_1.NotImplementedError,
55
56
  [ErrorCode_1.ErrorCode.PAYLOAD_TOO_LARGE_ERROR]: PayloadTooLargeError_1.PayloadTooLargeError,
56
57
  [ErrorCode_1.ErrorCode.PROCESS_ERROR]: process_1.ProcessError,
@@ -51,7 +51,7 @@ class BaseContext {
51
51
  const validObjectSize = (0, objectSizeValidator_1.validateObjectSize)(merged, (process.env.MAX_MEMORY_OBJECT_SIZE && parseInt(process.env.MAX_MEMORY_OBJECT_SIZE)) || 64000);
52
52
  if (validObjectSize.valid === false) {
53
53
  const errorMessage = `Cannot add value to context for key ${prop.toString()}: exceeded maximum context size.`;
54
- logger_1.logger.log('error', { traceId: `proxy-${this.traceId}` }, errorMessage, {
54
+ logger_1.logger.log('error', { traceId: this.traceId }, errorMessage, {
55
55
  organisationId: this.organisationId,
56
56
  projectId: this.projectId
57
57
  });