@cognigy/rest-api-client 2026.1.0 → 2026.2.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/README.md +15 -0
  3. package/build/apigroups/InsightsAPIGroup_2_1.js +27 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +134 -383
  5. package/build/apigroups/SimulationAPIGroup_2_0.js +24 -23
  6. package/build/apigroups/aiAgentsV2/agent.js +3 -0
  7. package/build/apigroups/aiAgentsV2/agentAPI.js +38 -0
  8. package/build/apigroups/aiAgentsV2/agentPersona.js +3 -0
  9. package/build/apigroups/aiAgentsV2/agentPersonaAPI.js +38 -0
  10. package/build/apigroups/aiAgentsV2/tool.js +3 -0
  11. package/build/apigroups/aiAgentsV2/toolAPI.js +35 -0
  12. package/build/apigroups/aiAgentsV2/toolDescriptor.js +3 -0
  13. package/build/apigroups/aiAgentsV2/toolDescriptorAPI.js +13 -0
  14. package/build/apigroups/index.js +3 -1
  15. package/build/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  16. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +15 -0
  17. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +13 -0
  18. package/build/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
  19. package/build/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +52 -0
  20. package/build/shared/charts/descriptors/connectionNodes/speechProviders/index.js +8 -7
  21. package/build/shared/charts/descriptors/index.js +4 -0
  22. package/build/shared/charts/descriptors/message/question/question.js +249 -59
  23. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
  24. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
  25. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +57 -1
  26. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
  27. package/build/shared/charts/descriptors/service/aiAgentV2.js +89 -0
  28. package/build/shared/charts/descriptors/service/index.js +5 -1
  29. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +15 -13
  30. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +57 -1
  31. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
  32. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
  33. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
  34. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  35. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
  36. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
  37. package/build/shared/errors/ErrorCode.js +2 -1
  38. package/build/shared/errors/ErrorCollection.js +1 -0
  39. package/build/shared/helper/BaseContext.js +1 -1
  40. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  41. package/build/shared/interfaces/handover.js +1 -0
  42. package/build/shared/interfaces/handoverProviders.js +0 -1
  43. package/build/shared/interfaces/messageAPI/endpoints.js +3 -0
  44. package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
  45. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  46. package/build/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  47. package/build/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +3 -0
  48. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +3 -0
  49. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +3 -0
  50. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +3 -0
  51. package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +3 -0
  52. package/build/shared/interfaces/security/ISessionScope.js +3 -0
  53. package/build/spec/aiAgentV2.spec.js +564 -0
  54. package/dist/esm/apigroups/InsightsAPIGroup_2_1.js +13 -0
  55. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +134 -383
  56. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +24 -23
  57. package/dist/esm/apigroups/aiAgentsV2/agent.js +2 -0
  58. package/dist/esm/apigroups/aiAgentsV2/agentAPI.js +24 -0
  59. package/dist/esm/apigroups/aiAgentsV2/agentPersona.js +2 -0
  60. package/dist/esm/apigroups/aiAgentsV2/agentPersonaAPI.js +24 -0
  61. package/dist/esm/apigroups/aiAgentsV2/aiAgentV2API.js +2 -0
  62. package/dist/esm/apigroups/aiAgentsV2/tool.js +2 -0
  63. package/dist/esm/apigroups/aiAgentsV2/toolAPI.js +21 -0
  64. package/dist/esm/apigroups/aiAgentsV2/toolDescriptor.js +2 -0
  65. package/dist/esm/apigroups/aiAgentsV2/toolDescriptorAPI.js +9 -0
  66. package/dist/esm/apigroups/index.js +1 -0
  67. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
  68. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +12 -0
  69. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +10 -0
  70. package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
  71. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +49 -0
  72. package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/index.js +3 -3
  73. package/dist/esm/shared/charts/descriptors/index.js +5 -1
  74. package/dist/esm/shared/charts/descriptors/message/question/question.js +249 -59
  75. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
  76. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
  77. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +56 -0
  78. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
  79. package/dist/esm/shared/charts/descriptors/service/aiAgentV2.js +87 -0
  80. package/dist/esm/shared/charts/descriptors/service/index.js +2 -0
  81. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +22 -20
  82. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +56 -0
  83. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
  84. package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
  85. package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
  86. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
  87. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
  88. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
  89. package/dist/esm/shared/errors/ErrorCode.js +2 -1
  90. package/dist/esm/shared/errors/ErrorCollection.js +1 -0
  91. package/dist/esm/shared/helper/BaseContext.js +1 -1
  92. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  93. package/dist/esm/shared/interfaces/handover.js +1 -0
  94. package/dist/esm/shared/interfaces/handoverProviders.js +0 -1
  95. package/dist/esm/shared/interfaces/messageAPI/endpoints.js +3 -0
  96. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +1 -0
  97. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  98. package/dist/esm/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
  99. package/dist/esm/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +2 -0
  100. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +2 -0
  101. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +2 -0
  102. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +2 -0
  103. package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +2 -0
  104. package/dist/esm/shared/interfaces/security/ISessionScope.js +2 -0
  105. package/dist/esm/spec/aiAgentV2.spec.js +563 -0
  106. package/package.json +6 -3
  107. package/types/index.d.ts +667 -30
@@ -175,10 +175,10 @@ class SessionConfigMapper extends BaseMapper {
175
175
  return synthesizer;
176
176
  }
177
177
  buildRecognizer(sessionParams, stt, vad, azureConfig) {
178
- var _a, _b, _c, _d;
178
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
179
179
  const { recognizer: sessionParamsRecognizer } = sessionParams || {};
180
- const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
181
- const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [] } = stt || {};
180
+ const { vendor: spVendor, language: spLanguage, hints: spHints, label: spLabel, model: spModel, azureSttEndpointId: spAzureSttEndpointId, audioLogging: spAudioLogging, hintsBoost: spHintsBoost, punctuation: spPunctuation, altLanguages: spAltLanguages = [], deepgramOptions: spDeepgramOptions, deepgramfluxOptions: spDeepgramfluxOptions, speechmaticsOptions: spSpeechmaticsOptions, openaiOptions: spOpenaiOptions, vad: spVad, profanityOption: spProfanityOption } = sessionParamsRecognizer || {};
181
+ const { sttVendor, sttLanguage, sttHints, sttLabel, sttHintsBoost, sttDisablePunctuation, googleModel, deepgramEndpointing, deepgramEndpointingValue, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, sttModel, deepgramSmartFormatting, deepgramShortUtterance, altLanguages = [], speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue } = stt || {};
182
182
  const recognizer = {};
183
183
  recognizer.language = spLanguage || sttLanguage || undefined;
184
184
  recognizer.hints = spHints || sttHints || undefined;
@@ -226,11 +226,57 @@ class SessionConfigMapper extends BaseMapper {
226
226
  }
227
227
  recognizer.deepgramOptions = deepgramOptions;
228
228
  }
229
+ if (recognizer.vendor === 'deepgramflux') {
230
+ const { endpointing: spEndpointing, } = spDeepgramfluxOptions || {};
231
+ /*
232
+ * session params: 'endpointing' is a boolean
233
+ * ssc node: 'deepgramfluxEndpointing' => boolean, 'deepgramfluxEndOfTurnThreshold' => number, 'deepgramfluxEndOfTurnTimeoutMs' => number
234
+ */
235
+ const isDeepgramfluxEndpointingEnabled = (_d = (typeof spEndpointing === "number" || deepgramfluxEndpointing)) !== null && _d !== void 0 ? _d : false;
236
+ const deepgramfluxOptions = {
237
+ endpointing: isDeepgramfluxEndpointingEnabled,
238
+ };
239
+ if (isDeepgramfluxEndpointingEnabled) {
240
+ deepgramfluxOptions.endOfTurnThreshold = deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7;
241
+ deepgramfluxOptions.endOfTurnTimeoutMs = deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000;
242
+ }
243
+ recognizer.deepgramfluxOptions = deepgramfluxOptions;
244
+ }
245
+ if (recognizer.vendor === 'speechmatics') {
246
+ const { endpointing: spEndpointing, } = spSpeechmaticsOptions || {};
247
+ /*
248
+ * session params: 'endpointing' is a number (milliseconds)
249
+ * ssc node: 'speechmaticsEndpointing' => boolean, 'speechmaticsEndpointingValue' => number
250
+ */
251
+ const isSpeechmaticsEndpointingEnabled = (_e = (typeof spEndpointing === "number" || speechmaticsEndpointing)) !== null && _e !== void 0 ? _e : false;
252
+ const speechmaticsOptions = {
253
+ transcription_config: {},
254
+ };
255
+ if (isSpeechmaticsEndpointingEnabled) {
256
+ speechmaticsOptions.endpointing = (_f = (spEndpointing || speechmaticsEndpointingValue)) !== null && _f !== void 0 ? _f : 500;
257
+ }
258
+ // When endpointing is disabled, simply don't include the property
259
+ // (the feature-server will use its default SPEECHMATICS_END_OF_UTTERANCE_SILENCE_DURATION_MS)
260
+ recognizer.speechmaticsOptions = speechmaticsOptions;
261
+ }
229
262
  if (recognizer.vendor === 'openai') {
230
263
  const openaiModel = spModel || sttModel;
231
264
  if (openaiModel) {
232
- recognizer.openaiOptions = Object.assign(Object.assign({}, ((_d = recognizer.openaiOptions) !== null && _d !== void 0 ? _d : {})), { model: openaiModel });
265
+ recognizer.openaiOptions = Object.assign(Object.assign({}, ((_g = recognizer.openaiOptions) !== null && _g !== void 0 ? _g : {})), { model: openaiModel });
266
+ }
267
+ const { endpointing: spEndpointing, } = spOpenaiOptions || {};
268
+ /*
269
+ * session params: 'endpointing' is a number (milliseconds)
270
+ * ssc node: 'openaiEndpointing' => boolean, 'openaiEndpointingValue' => number
271
+ */
272
+ const isOpenaiEndpointingEnabled = (_h = (typeof spEndpointing === "number" || openaiEndpointing)) !== null && _h !== void 0 ? _h : false;
273
+ const openaiOptions = Object.assign({}, ((_j = recognizer.openaiOptions) !== null && _j !== void 0 ? _j : {}));
274
+ if (isOpenaiEndpointingEnabled) {
275
+ openaiOptions.endpointing = (_k = (spEndpointing || openaiEndpointingValue)) !== null && _k !== void 0 ? _k : 500;
233
276
  }
277
+ // When endpointing is disabled, simply don't include the property
278
+ // (the feature-server will use its default OPENAI_TURN_DETECTION_SILENCE_DURATION_MS)
279
+ recognizer.openaiOptions = openaiOptions;
234
280
  }
235
281
  }
236
282
  if (this.has(spVad) || this.has(vad)) {
@@ -298,7 +344,7 @@ class SessionConfigMapper extends BaseMapper {
298
344
  }
299
345
  const mapper = new SessionConfigMapper("voiceGateway2");
300
346
  export function voiceConfigParamsToVoiceSettings(config, api) {
301
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m;
347
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
302
348
  let voiceSettings = {};
303
349
  if (config.sttVendor === 'none') {
304
350
  delete config.sttVendor;
@@ -335,25 +381,33 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
335
381
  hints = [...hints, ...config.sttHints];
336
382
  }
337
383
  const deepgramEndpointing = (_d = config.deepgramEndpointing) !== null && _d !== void 0 ? _d : true;
384
+ const deepgramfluxEndpointing = (_e = config.deepgramfluxEndpointing) !== null && _e !== void 0 ? _e : true;
338
385
  const deepgramShortUtterance = deepgramEndpointing ? false : true;
339
386
  // stt (recognizer)
340
387
  voiceSettings.stt = {
341
388
  sttVendor: config.sttVendor,
342
389
  sttLanguage: config.sttLanguage,
343
390
  sttHints: hints,
344
- sttLabel: (_e = config.sttLabel) !== null && _e !== void 0 ? _e : undefined,
391
+ sttLabel: (_f = config.sttLabel) !== null && _f !== void 0 ? _f : undefined,
345
392
  sttDisablePunctuation: config.sttDisablePunctuation !== undefined &&
346
393
  config.sttDisablePunctuation !== null
347
394
  ? !config.sttDisablePunctuation
348
395
  : undefined,
349
- googleModel: (_f = config.googleModel) !== null && _f !== void 0 ? _f : undefined,
396
+ googleModel: (_g = config.googleModel) !== null && _g !== void 0 ? _g : undefined,
350
397
  /* by default we enable endpointing - it is only undefined via SAP */
351
398
  deepgramEndpointing,
352
- deepgramEndpointingValue: (_g = config.deepgramEndpointingValue) !== null && _g !== void 0 ? _g : 250,
399
+ deepgramEndpointingValue: (_h = config.deepgramEndpointingValue) !== null && _h !== void 0 ? _h : 250,
353
400
  sttModel: config.sttModel || "",
354
- deepgramSmartFormatting: (_h = config.deepgramSmartFormatting) !== null && _h !== void 0 ? _h : undefined,
401
+ deepgramfluxEndpointing,
402
+ deepgramfluxEndOfTurnThreshold: (_j = config.deepgramfluxEndOfTurnThreshold) !== null && _j !== void 0 ? _j : 0.7,
403
+ deepgramfluxEndOfTurnTimeoutMs: (_k = config.deepgramfluxEndOfTurnTimeoutMs) !== null && _k !== void 0 ? _k : 5000,
404
+ deepgramSmartFormatting: (_l = config.deepgramSmartFormatting) !== null && _l !== void 0 ? _l : undefined,
355
405
  deepgramShortUtterance,
356
- listenDuringPrompt: (_j = config.sttListenDuringPrompt) !== null && _j !== void 0 ? _j : undefined,
406
+ speechmaticsEndpointing: (_m = config.speechmaticsEndpointing) !== null && _m !== void 0 ? _m : true,
407
+ speechmaticsEndpointingValue: (_o = config.speechmaticsEndpointingValue) !== null && _o !== void 0 ? _o : 500,
408
+ openaiEndpointing: (_p = config.openaiEndpointing) !== null && _p !== void 0 ? _p : true,
409
+ openaiEndpointingValue: (_q = config.openaiEndpointingValue) !== null && _q !== void 0 ? _q : 500,
410
+ listenDuringPrompt: (_r = config.sttListenDuringPrompt) !== null && _r !== void 0 ? _r : undefined,
357
411
  };
358
412
  // tts (synthesizer)
359
413
  voiceSettings.tts = {
@@ -420,7 +474,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
420
474
  flowNoInputFail: config.flowNoInputFail
421
475
  };
422
476
  // Check if userNoInputTimeout has a value and userNoInputTimeoutEnable is null or undefined to cover generic nodes
423
- if (((_k = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _k === void 0 ? void 0 : _k.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
477
+ if (((_s = voiceSettings === null || voiceSettings === void 0 ? void 0 : voiceSettings.userNoInput) === null || _s === void 0 ? void 0 : _s.userNoInputTimeout) && (voiceSettings.userNoInput.userNoInputTimeoutEnable === null || voiceSettings.userNoInput.userNoInputTimeoutEnable === undefined)) {
424
478
  voiceSettings.userNoInput.userNoInputTimeoutEnable = true;
425
479
  }
426
480
  voiceSettings.dtmf = {
@@ -428,7 +482,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
428
482
  dtmfInterDigitTimeout: config.dtmfInterDigitTimeout,
429
483
  dtmfMaxDigits: config.dtmfMaxDigits,
430
484
  dtmfMinDigits: config.dtmfMinDigits,
431
- dtmfSubmitDigit: (_l = config.dtmfSubmitDigit) === null || _l === void 0 ? void 0 : _l.trim(),
485
+ dtmfSubmitDigit: (_t = config.dtmfSubmitDigit) === null || _t === void 0 ? void 0 : _t.trim(),
432
486
  };
433
487
  if (config === null || config === void 0 ? void 0 : config.dtmfEnable) {
434
488
  if (voiceSettings.dtmf.dtmfSubmitDigit &&
@@ -465,7 +519,7 @@ export function voiceConfigParamsToVoiceSettings(config, api) {
465
519
  }
466
520
  // atmosphere sounds
467
521
  if (config.atmosphereAction) {
468
- if ((_m = config.atmosphereUrl) === null || _m === void 0 ? void 0 : _m.length) {
522
+ if ((_u = config.atmosphereUrl) === null || _u === void 0 ? void 0 : _u.length) {
469
523
  if (!isValidUrl(config.atmosphereUrl)) {
470
524
  throw new Error(`Audio file URL is invalid ${config.atmosphereUrl}`);
471
525
  }
@@ -2,7 +2,7 @@
2
2
  import { cleanTarget } from "../../../descriptors/voicegateway2/utils/helper";
3
3
  import { isValidUrl, isValidPhoneNumber } from "../utils/helper";
4
4
  export const transfer = {
5
- handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
5
+ handleInput(endpointType, params, isGenericNode = false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
6
6
  try {
7
7
  switch (endpointType) {
8
8
  case "bandwidth":
@@ -21,14 +21,15 @@ export const transfer = {
21
21
  return this.handleAudioCodesInput(prepareTransferParams(params), endpointType);
22
22
  case "voiceGateway2":
23
23
  default:
24
- return this.handleVGInput(prepareTransferParams(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia);
24
+ return this.handleVGInput(prepareTransferParams(params), recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia);
25
25
  }
26
26
  }
27
27
  catch (error) {
28
28
  throw Error(error.message);
29
29
  }
30
30
  },
31
- handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, mediaPath, anchorMedia) {
31
+ handleVGInput(transferParam, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, mediaPath, anchorMedia) {
32
+ var _a;
32
33
  const { transferType, transferTarget, transferReason, referredBy, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, dialTimeout, timeLimit, sttLabel } = transferParam;
33
34
  const payload = {
34
35
  _voiceGateway2: {
@@ -127,6 +128,27 @@ export const transfer = {
127
128
  smartFormatting: deepgramSmartFormatting !== null && deepgramSmartFormatting !== void 0 ? deepgramSmartFormatting : false
128
129
  };
129
130
  }
131
+ if (recognizer.vendor === 'deepgramflux') {
132
+ recognizer.deepgramfluxOptions = {
133
+ endpointing: deepgramfluxEndpointing || true,
134
+ endOfTurnThreshold: deepgramfluxEndOfTurnThreshold !== null && deepgramfluxEndOfTurnThreshold !== void 0 ? deepgramfluxEndOfTurnThreshold : 0.7,
135
+ endOfTurnTimeoutMs: deepgramfluxEndOfTurnTimeoutMs !== null && deepgramfluxEndOfTurnTimeoutMs !== void 0 ? deepgramfluxEndOfTurnTimeoutMs : 5000
136
+ };
137
+ }
138
+ if (recognizer.vendor === 'speechmatics') {
139
+ recognizer.speechmaticsOptions = {
140
+ transcription_config: {},
141
+ };
142
+ if (speechmaticsEndpointing) {
143
+ recognizer.speechmaticsOptions.endpointing = speechmaticsEndpointingValue !== null && speechmaticsEndpointingValue !== void 0 ? speechmaticsEndpointingValue : 500;
144
+ }
145
+ }
146
+ if (recognizer.vendor === 'openai') {
147
+ recognizer.openaiOptions = Object.assign({}, ((_a = recognizer.openaiOptions) !== null && _a !== void 0 ? _a : {}));
148
+ if (openaiEndpointing) {
149
+ recognizer.openaiOptions.endpointing = openaiEndpointingValue !== null && openaiEndpointingValue !== void 0 ? openaiEndpointingValue : 500;
150
+ }
151
+ }
130
152
  if (sttLabel) {
131
153
  recognizer.label = sttLabel;
132
154
  }
@@ -125,6 +125,64 @@ export const voiceConfigFields = [
125
125
  value: "deepgram"
126
126
  }
127
127
  },
128
+ {
129
+ key: "deepgramfluxEndpointing",
130
+ type: "toggle",
131
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
132
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
133
+ defaultValue: true,
134
+ condition: {
135
+ key: "sttVendor",
136
+ value: "deepgramflux"
137
+ }
138
+ },
139
+ {
140
+ key: "deepgramfluxEndOfTurnThreshold",
141
+ type: "slider",
142
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
143
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
144
+ defaultValue: 0.7,
145
+ params: {
146
+ min: 0.5,
147
+ max: 0.9,
148
+ step: 0.1
149
+ },
150
+ condition: {
151
+ and: [
152
+ {
153
+ key: "sttVendor",
154
+ value: "deepgramflux"
155
+ },
156
+ {
157
+ key: "deepgramfluxEndpointing",
158
+ value: true
159
+ },
160
+ ]
161
+ }
162
+ },
163
+ {
164
+ key: "deepgramfluxEndOfTurnTimeoutMs",
165
+ type: "number",
166
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
167
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
168
+ defaultValue: 5000,
169
+ params: {
170
+ min: 500,
171
+ max: 10000
172
+ },
173
+ condition: {
174
+ and: [
175
+ {
176
+ key: "sttVendor",
177
+ value: "deepgramflux"
178
+ },
179
+ {
180
+ key: "deepgramfluxEndpointing",
181
+ value: true
182
+ },
183
+ ]
184
+ }
185
+ },
128
186
  {
129
187
  key: "enableAdvancedSTTConfig",
130
188
  type: "toggle",
@@ -303,6 +361,13 @@ export const SESSION_SPEECH_PARAMETERS = createNodeDescriptor({
303
361
  "deepgramEndpointing",
304
362
  "deepgramEndpointingValue",
305
363
  "deepgramSmartFormatting",
364
+ "deepgramfluxEndpointing",
365
+ "deepgramfluxEndOfTurnThreshold",
366
+ "deepgramfluxEndOfTurnTimeoutMs",
367
+ "speechmaticsEndpointing",
368
+ "speechmaticsEndpointingValue",
369
+ "openaiEndpointing",
370
+ "openaiEndpointingValue",
306
371
  "sttHints",
307
372
  "sttHintsDynamicHints",
308
373
  "googleModel",
@@ -71,6 +71,13 @@ export const playNode = createNodeDescriptor({
71
71
  "deepgramEndpointing",
72
72
  "deepgramEndpointingValue",
73
73
  "deepgramSmartFormatting",
74
+ "deepgramfluxEndpointing",
75
+ "deepgramfluxEndOfTurnThreshold",
76
+ "deepgramfluxEndOfTurnTimeoutMs",
77
+ "speechmaticsEndpointing",
78
+ "speechmaticsEndpointingValue",
79
+ "openaiEndpointing",
80
+ "openaiEndpointingValue",
74
81
  "sttDisablePunctuation",
75
82
  "sttVadEnabled",
76
83
  "sttVadMode",
@@ -79,6 +79,132 @@ export const voiceConfigFields = [
79
79
  value: "deepgram"
80
80
  }
81
81
  },
82
+ {
83
+ key: "deepgramfluxEndpointing",
84
+ type: "toggle",
85
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
86
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
87
+ defaultValue: true,
88
+ condition: {
89
+ key: "sttVendor",
90
+ value: "deepgramflux"
91
+ }
92
+ },
93
+ {
94
+ key: "deepgramfluxEndOfTurnThreshold",
95
+ type: "slider",
96
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
97
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
98
+ defaultValue: 0.7,
99
+ params: {
100
+ min: 0.5,
101
+ max: 0.9,
102
+ step: 0.1
103
+ },
104
+ condition: {
105
+ and: [
106
+ {
107
+ key: "sttVendor",
108
+ value: "deepgramflux"
109
+ },
110
+ {
111
+ key: "deepgramfluxEndpointing",
112
+ value: true
113
+ },
114
+ ]
115
+ }
116
+ },
117
+ {
118
+ key: "deepgramfluxEndOfTurnTimeoutMs",
119
+ type: "number",
120
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
121
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
122
+ defaultValue: 5000,
123
+ params: {
124
+ min: 500,
125
+ max: 10000
126
+ },
127
+ condition: {
128
+ and: [
129
+ {
130
+ key: "sttVendor",
131
+ value: "deepgramflux"
132
+ },
133
+ {
134
+ key: "deepgramfluxEndpointing",
135
+ value: true
136
+ },
137
+ ]
138
+ }
139
+ },
140
+ {
141
+ key: "speechmaticsEndpointing",
142
+ type: "toggle",
143
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__LABEL",
144
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__DESCRIPTION",
145
+ defaultValue: true,
146
+ condition: {
147
+ key: "sttVendor",
148
+ value: "speechmatics"
149
+ }
150
+ },
151
+ {
152
+ key: "speechmaticsEndpointingValue",
153
+ type: "number",
154
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__LABEL",
155
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__DESCRIPTION",
156
+ defaultValue: 500,
157
+ params: {
158
+ min: 10,
159
+ max: 5000
160
+ },
161
+ condition: {
162
+ and: [
163
+ {
164
+ key: "sttVendor",
165
+ value: "speechmatics"
166
+ },
167
+ {
168
+ key: "speechmaticsEndpointing",
169
+ value: true
170
+ },
171
+ ]
172
+ }
173
+ },
174
+ {
175
+ key: "openaiEndpointing",
176
+ type: "toggle",
177
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__LABEL",
178
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__DESCRIPTION",
179
+ defaultValue: true,
180
+ condition: {
181
+ key: "sttVendor",
182
+ value: "openai"
183
+ }
184
+ },
185
+ {
186
+ key: "openaiEndpointingValue",
187
+ type: "number",
188
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__LABEL",
189
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__DESCRIPTION",
190
+ defaultValue: 500,
191
+ params: {
192
+ min: 10,
193
+ max: 1000
194
+ },
195
+ condition: {
196
+ and: [
197
+ {
198
+ key: "sttVendor",
199
+ value: "openai"
200
+ },
201
+ {
202
+ key: "openaiEndpointing",
203
+ value: true
204
+ },
205
+ ]
206
+ }
207
+ },
82
208
  {
83
209
  key: "sttHints",
84
210
  type: "textArray",
@@ -107,6 +233,10 @@ export const voiceConfigFields = [
107
233
  key: "sttVendor",
108
234
  value: "deepgram"
109
235
  },
236
+ {
237
+ key: "sttVendor",
238
+ value: "deepgramflux"
239
+ },
110
240
  ]
111
241
  }
112
242
  },
@@ -980,6 +1110,13 @@ export const setSessionConfigNode = createNodeDescriptor({
980
1110
  "deepgramEndpointing",
981
1111
  "deepgramEndpointingValue",
982
1112
  "deepgramSmartFormatting",
1113
+ "deepgramfluxEndpointing",
1114
+ "deepgramfluxEndOfTurnThreshold",
1115
+ "deepgramfluxEndOfTurnTimeoutMs",
1116
+ "speechmaticsEndpointing",
1117
+ "speechmaticsEndpointingValue",
1118
+ "openaiEndpointing",
1119
+ "openaiEndpointingValue",
983
1120
  "sttHints",
984
1121
  "sttHintsDynamicHints",
985
1122
  "googleModel",
@@ -1102,7 +1239,6 @@ export const setSessionConfigNode = createNodeDescriptor({
1102
1239
  try {
1103
1240
  const voiceSettings = voiceConfigParamsToVoiceSettings(config, api);
1104
1241
  const payload = setSessionConfig.handleVGInput(voiceSettings, sessionParams, api);
1105
- api.log("error", JSON.stringify(payload));
1106
1242
  yield api.say(null, {
1107
1243
  _cognigy: payload,
1108
1244
  });
@@ -402,6 +402,132 @@ export const transferNode = createNodeDescriptor({
402
402
  value: "deepgram"
403
403
  }
404
404
  },
405
+ {
406
+ key: "deepgramfluxEndpointing",
407
+ type: "toggle",
408
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__LABEL",
409
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_ENDPOINTING__DESCRIPTION",
410
+ defaultValue: false,
411
+ condition: {
412
+ key: "sttVendor",
413
+ value: "deepgramflux"
414
+ }
415
+ },
416
+ {
417
+ key: "deepgramfluxEndOfTurnThreshold",
418
+ type: "slider",
419
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__LABEL",
420
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_THRESHOLD__DESCRIPTION",
421
+ defaultValue: 0.7,
422
+ params: {
423
+ min: 0.5,
424
+ max: 0.9,
425
+ step: 0.1
426
+ },
427
+ condition: {
428
+ and: [
429
+ {
430
+ key: "sttVendor",
431
+ value: "deepgramflux"
432
+ },
433
+ {
434
+ key: "deepgramfluxEndpointing",
435
+ value: true
436
+ },
437
+ ]
438
+ }
439
+ },
440
+ {
441
+ key: "deepgramfluxEndOfTurnTimeoutMs",
442
+ type: "number",
443
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__LABEL",
444
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__DEEPGRAMFLUX_END_OF_TURN_TIMEOUT_MS__DESCRIPTION",
445
+ defaultValue: 5000,
446
+ params: {
447
+ min: 500,
448
+ max: 10000
449
+ },
450
+ condition: {
451
+ and: [
452
+ {
453
+ key: "sttVendor",
454
+ value: "deepgramflux"
455
+ },
456
+ {
457
+ key: "deepgramfluxEndpointing",
458
+ value: true
459
+ },
460
+ ]
461
+ }
462
+ },
463
+ {
464
+ key: "speechmaticsEndpointing",
465
+ type: "toggle",
466
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__LABEL",
467
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING__DESCRIPTION",
468
+ defaultValue: true,
469
+ condition: {
470
+ key: "sttVendor",
471
+ value: "speechmatics"
472
+ }
473
+ },
474
+ {
475
+ key: "speechmaticsEndpointingValue",
476
+ type: "number",
477
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__LABEL",
478
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__SPEECHMATICS_ENDPOINTING_TIME__DESCRIPTION",
479
+ defaultValue: 500,
480
+ params: {
481
+ min: 10,
482
+ max: 5000
483
+ },
484
+ condition: {
485
+ and: [
486
+ {
487
+ key: "sttVendor",
488
+ value: "speechmatics"
489
+ },
490
+ {
491
+ key: "speechmaticsEndpointing",
492
+ value: true
493
+ },
494
+ ]
495
+ }
496
+ },
497
+ {
498
+ key: "openaiEndpointing",
499
+ type: "toggle",
500
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__LABEL",
501
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING__DESCRIPTION",
502
+ defaultValue: true,
503
+ condition: {
504
+ key: "sttVendor",
505
+ value: "openai"
506
+ }
507
+ },
508
+ {
509
+ key: "openaiEndpointingValue",
510
+ type: "number",
511
+ label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__LABEL",
512
+ description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__OPENAI_ENDPOINTING_TIME__DESCRIPTION",
513
+ defaultValue: 500,
514
+ params: {
515
+ min: 10,
516
+ max: 1000
517
+ },
518
+ condition: {
519
+ and: [
520
+ {
521
+ key: "sttVendor",
522
+ value: "openai"
523
+ },
524
+ {
525
+ key: "openaiEndpointing",
526
+ value: true
527
+ },
528
+ ]
529
+ }
530
+ },
405
531
  {
406
532
  key: "googleModel",
407
533
  type: "select",
@@ -462,6 +588,13 @@ export const transferNode = createNodeDescriptor({
462
588
  "deepgramEndpointing",
463
589
  "deepgramEndpointingValue",
464
590
  "deepgramSmartFormatting",
591
+ "deepgramfluxEndpointing",
592
+ "deepgramfluxEndOfTurnThreshold",
593
+ "deepgramfluxEndOfTurnTimeoutMs",
594
+ "speechmaticsEndpointing",
595
+ "speechmaticsEndpointingValue",
596
+ "openaiEndpointing",
597
+ "openaiEndpointingValue",
465
598
  "googleModel",
466
599
  "dialTranscriptionWebhook",
467
600
  "recognitionChannel"
@@ -551,7 +684,7 @@ export const transferNode = createNodeDescriptor({
551
684
  summary: "UI__NODE_EDITOR__VOICEGATEWAY2__TRANSFER__SUMMARY",
552
685
  function: ({ cognigy, config, }) => __awaiter(void 0, void 0, void 0, function* () {
553
686
  const { api, input } = cognigy;
554
- const { transferType, transferTarget, referredBy, mediaPath, useTransferSipHeaders, transferSipHeaders = {}, transferReason, dialMusic, dialTranscriptionWebhook, dialCallerId, recognitionChannel, sttVendor, sttLanguage, sttDisablePunctuation, dialTimeout, enableTimeLimit, timeLimit, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, sttLabel, googleModel, sttModel, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, agentAssistEnabled, agentAssistHeadersKey = customHeaderDefaultValue, anchorMedia } = config;
687
+ const { transferType, transferTarget, referredBy, mediaPath, useTransferSipHeaders, transferSipHeaders = {}, transferReason, dialMusic, dialTranscriptionWebhook, dialCallerId, recognitionChannel, sttVendor, sttLanguage, sttDisablePunctuation, dialTimeout, enableTimeLimit, timeLimit, amdEnabled, amdRedirectOnMachineDetected, amdRedirectText, sttLabel, googleModel, sttModel, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, agentAssistEnabled, agentAssistHeadersKey = customHeaderDefaultValue, anchorMedia } = config;
555
688
  const transferParams = {
556
689
  transferType,
557
690
  transferReason,
@@ -625,7 +758,7 @@ export const transferNode = createNodeDescriptor({
625
758
  transferParams.useTransferSipHeaders = false;
626
759
  api.log("error", "Invalid JSON in Transfer SIP Headers");
627
760
  }
628
- const payload = transfer.handleInput("voiceGateway2", transferParams, false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, media, anchorMedia);
761
+ const payload = transfer.handleInput("voiceGateway2", transferParams, false, recognitionChannel, sttVendor, sttLanguage, googleModel, sttModel, sttDisablePunctuation, deepgramEndpointing, deepgramEndpointingValue, deepgramSmartFormatting, deepgramfluxEndpointing, deepgramfluxEndOfTurnThreshold, deepgramfluxEndOfTurnTimeoutMs, speechmaticsEndpointing, speechmaticsEndpointingValue, openaiEndpointing, openaiEndpointingValue, media, anchorMedia);
629
762
  yield api.say(null, {
630
763
  _cognigy: payload,
631
764
  });
@@ -3,9 +3,10 @@ export var ErrorCode;
3
3
  ErrorCode[ErrorCode["BAD_REQUEST"] = 400] = "BAD_REQUEST";
4
4
  ErrorCode[ErrorCode["UNAUTHORIZED_ERROR"] = 401] = "UNAUTHORIZED_ERROR";
5
5
  ErrorCode[ErrorCode["PAYMENT_REQUIRED_ERROR"] = 402] = "PAYMENT_REQUIRED_ERROR";
6
+ ErrorCode[ErrorCode["FORBIDDEN_ERROR"] = 403] = "FORBIDDEN_ERROR";
7
+ ErrorCode[ErrorCode["NOT_FOUND"] = 404] = "NOT_FOUND";
6
8
  ErrorCode[ErrorCode["PAYLOAD_TOO_LARGE_ERROR"] = 413] = "PAYLOAD_TOO_LARGE_ERROR";
7
9
  ErrorCode[ErrorCode["TOO_MANY_REQUESTS_ERROR"] = 429] = "TOO_MANY_REQUESTS_ERROR";
8
- ErrorCode[ErrorCode["FORBIDDEN_ERROR"] = 403] = "FORBIDDEN_ERROR";
9
10
  ErrorCode[ErrorCode["BAD_GATEWAY"] = 502] = "BAD_GATEWAY";
10
11
  ErrorCode[ErrorCode["SERVICE_UNAVAILABLE_ERROR"] = 503] = "SERVICE_UNAVAILABLE_ERROR";
11
12
  ErrorCode[ErrorCode["GATEWAY_TIMEOUT_ERROR"] = 504] = "GATEWAY_TIMEOUT_ERROR";
@@ -48,6 +48,7 @@ export const ErrorCollection = {
48
48
  [ErrorCode.METHOD_NOT_ALLOWED_ERROR]: MethodNotAllowedError,
49
49
  [ErrorCode.MISSING_ARGUMENT_ERROR]: MissingArgumentError,
50
50
  [ErrorCode.NETWORK_ERROR]: NetworkError,
51
+ [ErrorCode.NOT_FOUND]: ResourceNotFoundError,
51
52
  [ErrorCode.NOT_IMPLEMENTED_ERROR]: NotImplementedError,
52
53
  [ErrorCode.PAYLOAD_TOO_LARGE_ERROR]: PayloadTooLargeError,
53
54
  [ErrorCode.PROCESS_ERROR]: ProcessError,
@@ -48,7 +48,7 @@ export class BaseContext {
48
48
  const validObjectSize = validateObjectSize(merged, (process.env.MAX_MEMORY_OBJECT_SIZE && parseInt(process.env.MAX_MEMORY_OBJECT_SIZE)) || 64000);
49
49
  if (validObjectSize.valid === false) {
50
50
  const errorMessage = `Cannot add value to context for key ${prop.toString()}: exceeded maximum context size.`;
51
- logger.log('error', { traceId: `proxy-${this.traceId}` }, errorMessage, {
51
+ logger.log('error', { traceId: this.traceId }, errorMessage, {
52
52
  organisationId: this.organisationId,
53
53
  projectId: this.projectId
54
54
  });