@cognigy/rest-api-client 0.13.2 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +5 -1
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +15 -3
  3. package/build/shared/charts/descriptors/apps/setHtmlAppState.js +83 -13
  4. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnection.js +14 -0
  5. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +19 -0
  6. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +11 -0
  7. package/build/shared/charts/descriptors/index.js +8 -5
  8. package/build/shared/charts/descriptors/message/question/optionalQuestion.js +22 -6
  9. package/build/shared/charts/descriptors/message/question/question.js +193 -25
  10. package/build/shared/charts/descriptors/message/say.js +33 -2
  11. package/build/shared/charts/descriptors/service/completeText.js +316 -0
  12. package/build/shared/charts/descriptors/service/handoverV2.js +47 -1
  13. package/build/shared/charts/descriptors/service/index.js +3 -1
  14. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +40 -7
  15. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +93 -32
  16. package/build/shared/charts/descriptors/voice/nodes/play.js +8 -6
  17. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +90 -8
  18. package/build/shared/charts/descriptors/voice/nodes/transfer.js +2 -2
  19. package/build/shared/charts/descriptors/voice/utils/helper.js +11 -1
  20. package/build/shared/charts/descriptors/voicegateway/nodes/agentAssist.js +2 -2
  21. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +4 -1
  22. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +3 -3
  23. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +125 -26
  24. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +91 -7
  25. package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +18 -11
  26. package/build/shared/charts/descriptors/voicegateway2/utils/strip-nulls.js +4 -1
  27. package/build/shared/charts/helpers/generativeAI/getRephraseWithAIFields.js +100 -0
  28. package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +44 -0
  29. package/build/shared/constants.js +2 -1
  30. package/build/shared/interfaces/ai.js +16 -0
  31. package/build/shared/interfaces/analytics/IAnalyticsDataGoals.js +3 -0
  32. package/build/shared/interfaces/endpointInterface.js +1 -0
  33. package/build/shared/interfaces/filemanager/IRuntimeFile.js +30 -0
  34. package/build/shared/interfaces/filemanager/index.js +1 -0
  35. package/build/shared/interfaces/handover.js +39 -3
  36. package/build/shared/interfaces/messageAPI/endpoints.js +2 -0
  37. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  38. package/build/shared/interfaces/resources/IConnectionSchema.js +2 -1
  39. package/build/shared/interfaces/resources/IExtension.js +2 -1
  40. package/build/shared/interfaces/resources/IFlow.js +2 -1
  41. package/build/shared/interfaces/resources/ILexicon.js +15 -2
  42. package/build/shared/interfaces/resources/INodeDescriptorSet.js +1 -1
  43. package/build/shared/interfaces/resources/intent/IIntent.js +5 -2
  44. package/build/shared/interfaces/resources/intent/IIntentRelation.js +3 -1
  45. package/build/shared/interfaces/resources/settings/IAgentSettings.js +9 -4
  46. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +136 -0
  47. package/build/shared/interfaces/resources/settings/index.js +7 -1
  48. package/build/shared/interfaces/restAPI/resources/flow/v2.0/sentence/IGenerateSentencesRest_2_0.js +3 -0
  49. package/package.json +1 -1
  50. package/types/index.d.ts +569 -184
@@ -0,0 +1,316 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.COMPLETE_TEXT = void 0;
4
+ /* Custom modules */
5
+ const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ exports.COMPLETE_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
7
+ type: "completeText",
8
+ defaultLabel: {
9
+ default: "Complete Text",
10
+ },
11
+ summary: {
12
+ default: "Generates a new text based on a given prompt",
13
+ deDE: "Erzeugt einen neuen Text auf Grundlage eines vorhandenen Kontext"
14
+ },
15
+ fields: [
16
+ {
17
+ key: "prompt",
18
+ label: {
19
+ default: "Prompt",
20
+ deDE: "Kontext"
21
+ },
22
+ type: "cognigyText",
23
+ description: {
24
+ default: "The prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.",
25
+ deDE: "Die Eingabeaufforderung, für die Vervollständigungen generiert werden sollen, codiert als Zeichenfolge, Array von Zeichenfolgen, Array von Token oder Array von Token-Arrays."
26
+ },
27
+ params: {
28
+ required: true,
29
+ multiline: true
30
+ },
31
+ defaultValue: ""
32
+ },
33
+ {
34
+ key: "samplingMethod",
35
+ label: "Sampling Method",
36
+ type: "select",
37
+ defaultValue: "temperature",
38
+ params: {
39
+ options: [
40
+ {
41
+ label: "Temperature",
42
+ value: "temperature"
43
+ },
44
+ {
45
+ label: "Top Percentage",
46
+ value: "topP"
47
+ }
48
+ ]
49
+ }
50
+ },
51
+ {
52
+ key: "temperature",
53
+ label: {
54
+ default: "Temperature",
55
+ deDE: "Temperatur"
56
+ },
57
+ type: "slider",
58
+ description: {
59
+ default: "What sampling temperature to use. Higher values means the model will take more risks",
60
+ deDE: "Welche Probenahmetemperatur soll verwendet werden. Höhere Werte bedeuten, dass das Modell mehr Risiken eingeht"
61
+ },
62
+ defaultValue: 0.7,
63
+ params: {
64
+ min: 0,
65
+ max: 1,
66
+ step: 0.1
67
+ },
68
+ condition: {
69
+ key: "samplingMethod",
70
+ value: "temperature",
71
+ }
72
+ },
73
+ {
74
+ key: "topP",
75
+ label: {
76
+ default: "Top Percentage",
77
+ deDE: "Maximaler Prozentsatz"
78
+ },
79
+ type: "slider",
80
+ description: {
81
+ default: "An alternative to sampling with temperature, called nucleus sampling",
82
+ deDE: "Eine Alternative zum Sampling mit Temperatur, genannt Nucleus Sampling"
83
+ },
84
+ defaultValue: 1,
85
+ params: {
86
+ min: 0,
87
+ max: 1,
88
+ step: 0.1
89
+ },
90
+ condition: {
91
+ key: "samplingMethod",
92
+ value: "topP",
93
+ }
94
+ },
95
+ {
96
+ key: "maxTokens",
97
+ label: {
98
+ default: "Maximal Tokens",
99
+ deDE: "Maximale Zeichen"
100
+ },
101
+ type: "slider",
102
+ description: {
103
+ default: "The maximum number of tokens to generate in the completion",
104
+ deDE: "Die maximale Anzahl von Token, die beim Abschluss generiert werden sollen"
105
+ },
106
+ defaultValue: 100,
107
+ params: {
108
+ min: 1,
109
+ max: 4000,
110
+ step: 1
111
+ }
112
+ },
113
+ {
114
+ key: "frequencyPenalty",
115
+ label: {
116
+ default: "Frequency Penalty",
117
+ deDE: "Frequenzregelung"
118
+ },
119
+ type: "slider",
120
+ description: {
121
+ default: "Number between -2.0 and 2.0",
122
+ deDE: "Zahl zwischen -2,0 und 2,0"
123
+ },
124
+ defaultValue: 0,
125
+ params: {
126
+ min: -2,
127
+ max: 2,
128
+ step: 0.1
129
+ }
130
+ },
131
+ {
132
+ key: "presencePenalty",
133
+ label: {
134
+ default: "Presence Penalty",
135
+ deDE: "Gegenwartsregelung"
136
+ },
137
+ type: "slider",
138
+ description: {
139
+ default: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
140
+ deDE: "Zahl zwischen -2,0 und 2,0. Positive Werte bestrafen neue Token basierend darauf, ob sie bisher im Text erschienen sind, und erhöhen die Wahrscheinlichkeit, dass das Modell über neue Themen spricht"
141
+ },
142
+ defaultValue: 0,
143
+ params: {
144
+ min: -2,
145
+ max: 2,
146
+ step: 0.1
147
+ }
148
+ },
149
+ {
150
+ key: "useStop",
151
+ label: {
152
+ default: "Use Stops",
153
+ deDE: "Stopps verwenden"
154
+ },
155
+ type: "toggle",
156
+ description: {
157
+ default: "Whether to use a list of stop words to let the Generative AI know where the sentence stops",
158
+ deDE: "Ob eine Liste von Stoppwörtern verwendet werden soll, um der Generativen AI mitzuteilen, wo der Satz endet"
159
+ },
160
+ defaultValue: false
161
+ },
162
+ {
163
+ key: "stop",
164
+ label: {
165
+ default: "Stops",
166
+ deDE: "Stopps"
167
+ },
168
+ type: "textArray",
169
+ description: {
170
+ default: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence",
171
+ deDE: "Bis zu 4 Sequenzen, bei denen die API aufhört, weitere Token zu generieren. Der zurückgegebene Text enthält nicht die Stoppsequenz"
172
+ },
173
+ condition: {
174
+ key: "useStop",
175
+ value: true
176
+ }
177
+ },
178
+ {
179
+ key: "timeout",
180
+ label: {
181
+ default: "Timeout",
182
+ },
183
+ defaultValue: 5000,
184
+ type: "number",
185
+ description: {
186
+ default: "The maximum amount of milliseconds to wait for a response from the Generative AI Provider",
187
+ },
188
+ },
189
+ {
190
+ key: "storeLocation",
191
+ type: "select",
192
+ label: {
193
+ default: "Where to store the result",
194
+ deDE: "Wo das Ergebnis gespeichert werden soll"
195
+ },
196
+ defaultValue: "input",
197
+ params: {
198
+ options: [
199
+ {
200
+ label: "Input",
201
+ value: "input"
202
+ },
203
+ {
204
+ label: "Context",
205
+ value: "context"
206
+ }
207
+ ],
208
+ required: true
209
+ },
210
+ },
211
+ {
212
+ key: "inputKey",
213
+ type: "cognigyText",
214
+ label: {
215
+ default: "Input Key to store Result",
216
+ deDE: "Input Key zum Speichern des Ergebnisses"
217
+ },
218
+ defaultValue: "generativeAi",
219
+ condition: {
220
+ key: "storeLocation",
221
+ value: "input",
222
+ }
223
+ },
224
+ {
225
+ key: "contextKey",
226
+ type: "cognigyText",
227
+ label: {
228
+ default: "Context Key to store Result",
229
+ deDE: "Context Key zum Speichern des Ergebnisses"
230
+ },
231
+ defaultValue: "generativeAi",
232
+ condition: {
233
+ key: "storeLocation",
234
+ value: "context",
235
+ }
236
+ },
237
+ ],
238
+ sections: [
239
+ {
240
+ key: "advanced",
241
+ label: {
242
+ default: "Advanced",
243
+ deDE: "Erweitert"
244
+ },
245
+ defaultCollapsed: true,
246
+ fields: [
247
+ "model",
248
+ "samplingMethod",
249
+ "temperature",
250
+ "topP",
251
+ "maxTokens",
252
+ "presencePenalty",
253
+ "frequencyPenalty",
254
+ "useStop",
255
+ "stop",
256
+ "timeout",
257
+ ]
258
+ },
259
+ {
260
+ key: "storage",
261
+ label: {
262
+ default: "Storage Option",
263
+ deDE: "Speicheroption"
264
+ },
265
+ defaultCollapsed: true,
266
+ fields: [
267
+ "storeLocation",
268
+ "inputKey",
269
+ "contextKey",
270
+ ]
271
+ }
272
+ ],
273
+ form: [
274
+ { type: "field", key: "prompt" },
275
+ { type: "section", key: "advanced" },
276
+ { type: "section", key: "storage" },
277
+ ],
278
+ appearance: {},
279
+ tags: ["service"],
280
+ function: async ({ cognigy, config }) => {
281
+ const { api } = cognigy;
282
+ const { prompt, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, } = config;
283
+ try {
284
+ const data = {
285
+ prompt,
286
+ temperature,
287
+ maxTokens,
288
+ topP,
289
+ presencePenalty,
290
+ frequencyPenalty,
291
+ timeoutInMs: timeout,
292
+ };
293
+ if (useStop) {
294
+ data["stop"] = stop;
295
+ }
296
+ const response = await api.runGenerativeAIPrompt(data);
297
+ if (storeLocation === "context") {
298
+ api.addToContext(contextKey, response, "simple");
299
+ }
300
+ else {
301
+ // @ts-ignore
302
+ api.addToInput(inputKey, response);
303
+ }
304
+ }
305
+ catch (error) {
306
+ if (storeLocation === "context") {
307
+ api.addToContext(contextKey, { error: error }, "simple");
308
+ }
309
+ else {
310
+ // @ts-ignore
311
+ api.addToInput(inputKey, { error: error });
312
+ }
313
+ }
314
+ }
315
+ });
316
+ //# sourceMappingURL=completeText.js.map
@@ -57,6 +57,13 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
57
57
  ],
58
58
  },
59
59
  },
60
+ {
61
+ key: "additionalCategoryIds",
62
+ type: "cognigyTextArray",
63
+ label: "Additional Category Ids",
64
+ description: "Additional Category Ids to be passed to Ring Central Engage handover provider",
65
+ defaultValue: []
66
+ },
60
67
  {
61
68
  key: "sendResolveEvent",
62
69
  type: "toggle",
@@ -109,6 +116,25 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
109
116
  description: "The details that should be displayed to the Live Chat Agent.",
110
117
  defaultValue: "[]",
111
118
  },
119
+ {
120
+ key: "eightByEightChannelId",
121
+ type: "cognigyText",
122
+ label: "8x8 Channel Id",
123
+ description: "The 8x8 Channel Id that should be used.",
124
+ },
125
+ {
126
+ key: "eightByEightQueueId",
127
+ type: "cognigyText",
128
+ label: "8x8 Queue Id",
129
+ description: "The 8x8 Queue Id that should be used.",
130
+ },
131
+ {
132
+ key: "eightByEightJSONProps",
133
+ type: "json",
134
+ label: "8x8 JSON properties",
135
+ description: "Extend 8x8 properties with custom JSON.",
136
+ defaultValue: [],
137
+ },
112
138
  ],
113
139
  sections: [
114
140
  {
@@ -156,6 +182,24 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
156
182
  "salesforcePrechatEntities",
157
183
  "salesforcePrechatDetails"
158
184
  ]
185
+ },
186
+ {
187
+ key: "eightByEightSettings",
188
+ label: "8x8 Settings",
189
+ defaultCollapsed: true,
190
+ fields: [
191
+ "eightByEightChannelId",
192
+ "eightByEightQueueId",
193
+ "eightByEightJSONProps"
194
+ ]
195
+ },
196
+ {
197
+ key: "ringCentralEngageSettings",
198
+ label: "Ring Central Engage Settings",
199
+ defaultCollapsed: true,
200
+ fields: [
201
+ "additionalCategoryIds",
202
+ ]
159
203
  }
160
204
  ],
161
205
  form: [
@@ -169,7 +213,9 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
169
213
  key: "liveAgentSettings"
170
214
  },
171
215
  { type: "section", key: "chatwootSettings" },
172
- { type: "section", key: "salesforceSettings" }
216
+ { type: "section", key: "salesforceSettings" },
217
+ { type: "section", key: "eightByEightSettings" },
218
+ { type: "section", key: "ringCentralEngageSettings" },
173
219
  ]
174
220
  .filter(element => !!element),
175
221
  tags: ["service", "livechat", "chat", "hand over"],
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ON_SCHEDULING_ERROR = exports.ON_SCHEDULED = exports.TRIGGER_FUNCTION = exports.HTTP_CONNECTION_OAUTH2 = exports.HTTP_CONNECTION_APIKEYXKEY = exports.HTTP_CONNECTION_APIKEYAUTHKEY = exports.HTTP_CONNECTION_BASIC = exports.HTTP_REQUEST = exports.CHECK_AGENT_AVAILABILITY = exports.HANDOVER_V2 = exports.HANDOVER = void 0;
3
+ exports.COMPLETE_TEXT = exports.ON_SCHEDULING_ERROR = exports.ON_SCHEDULED = exports.TRIGGER_FUNCTION = exports.HTTP_CONNECTION_OAUTH2 = exports.HTTP_CONNECTION_APIKEYXKEY = exports.HTTP_CONNECTION_APIKEYAUTHKEY = exports.HTTP_CONNECTION_BASIC = exports.HTTP_REQUEST = exports.CHECK_AGENT_AVAILABILITY = exports.HANDOVER_V2 = exports.HANDOVER = void 0;
4
4
  var handover_1 = require("./handover");
5
5
  Object.defineProperty(exports, "HANDOVER", { enumerable: true, get: function () { return handover_1.HANDOVER; } });
6
6
  var handoverV2_1 = require("./handoverV2");
@@ -21,4 +21,6 @@ var onScheduled_1 = require("./triggerFunction/onScheduled");
21
21
  Object.defineProperty(exports, "ON_SCHEDULED", { enumerable: true, get: function () { return onScheduled_1.ON_SCHEDULED; } });
22
22
  var onSchedulingError_1 = require("./triggerFunction/onSchedulingError");
23
23
  Object.defineProperty(exports, "ON_SCHEDULING_ERROR", { enumerable: true, get: function () { return onSchedulingError_1.ON_SCHEDULING_ERROR; } });
24
+ var completeText_1 = require("./completeText");
25
+ Object.defineProperty(exports, "COMPLETE_TEXT", { enumerable: true, get: function () { return completeText_1.COMPLETE_TEXT; } });
24
26
  //# sourceMappingURL=index.js.map
@@ -36,7 +36,10 @@ function voiceConfigParamsToVoiceSettings(config, api) {
36
36
  ttsVendor: config.ttsVendor,
37
37
  ttsLanguage: config.ttsLanguage,
38
38
  ttsVoice: config.ttsVoice,
39
+ azureServiceEndpoint: config.azureTtsDeploymentId
39
40
  };
41
+ if (config.ttsVendor === 'microsoft' && config.azureTtsDeploymentId)
42
+ voiceSettings.tts.azureServiceEndpoint = config.azureTtsDeploymentId;
40
43
  // userNoInput
41
44
  voiceSettings.userNoInput = {
42
45
  userNoInputMode: config.userNoInputMode,
@@ -85,6 +88,19 @@ function voiceConfigParamsToVoiceSettings(config, api) {
85
88
  delete voiceSettings.continuousAsr.asrDigit;
86
89
  }
87
90
  }
91
+ // Azure configurations
92
+ if ((config.sttVendor === "microsoft" && config.azureSttContextId) ||
93
+ (config.sttVendor === "microsoft" && config.azureTtsDeploymentId)) {
94
+ voiceSettings.azureConfig = {
95
+ azureSttContextId: config.azureSttContextId
96
+ ? config.azureSttContextId.trim()
97
+ : undefined,
98
+ azureEnableAudioLogging: config.azureEnableAudioLogging || undefined,
99
+ azureTtsDeploymentId: config.azureTtsDeploymentId
100
+ ? config.azureTtsDeploymentId.trim()
101
+ : undefined,
102
+ };
103
+ }
88
104
  return (0, strip_nulls_1.stripNulls)(voiceSettings);
89
105
  }
90
106
  exports.voiceConfigParamsToVoiceSettings = voiceConfigParamsToVoiceSettings;
@@ -114,7 +130,7 @@ exports.setSessionConfig = {
114
130
  },
115
131
  handleVGInput(voiceSettings) {
116
132
  var _a;
117
- const { bargeIn, continuousAsr, stt, tts, userNoInput, dtmf, vad } = voiceSettings;
133
+ const { bargeIn, continuousAsr, stt, tts, userNoInput, dtmf, vad, azureConfig, } = voiceSettings;
118
134
  const user = {};
119
135
  const synthesizer = {};
120
136
  const recognizer = {};
@@ -128,6 +144,8 @@ exports.setSessionConfig = {
128
144
  }
129
145
  recognizer.vendor = (stt === null || stt === void 0 ? void 0 : stt.sttVendor) ? stt.sttVendor : "default";
130
146
  recognizer.punctuation = stt === null || stt === void 0 ? void 0 : stt.sttDisablePunctuation;
147
+ recognizer.azureSttEndpointId = azureConfig === null || azureConfig === void 0 ? void 0 : azureConfig.azureSttContextId;
148
+ recognizer.audioLogging = azureConfig === null || azureConfig === void 0 ? void 0 : azureConfig.azureEnableAudioLogging;
131
149
  if (vad) {
132
150
  recognizer.vad = { enable: vad.enable };
133
151
  if (vad.enable) {
@@ -140,6 +158,7 @@ exports.setSessionConfig = {
140
158
  synthesizer.vendor = tts.ttsVendor;
141
159
  synthesizer.language = tts.ttsLanguage;
142
160
  synthesizer.voice = tts.ttsVoice;
161
+ synthesizer.azureServiceEndpoint = tts.azureServiceEndpoint ? tts.azureServiceEndpoint : "";
143
162
  }
144
163
  //verify if there are No User Input Configs to set
145
164
  if (userNoInput) {
@@ -166,8 +185,8 @@ exports.setSessionConfig = {
166
185
  barge.enable = bargeIn.bargeInEnable;
167
186
  /* Barge in minimum words. min: 1, max: 5, default: 1 */
168
187
  bargeIn.bargeInMinimunWords >= 1 && bargeIn.bargeInMinimunWords <= 5
169
- ? barge.minBargeinWordCount = bargeIn.bargeInMinimunWords
170
- : barge.minBargeinWordCount = 1;
188
+ ? (barge.minBargeinWordCount = bargeIn.bargeInMinimunWords)
189
+ : (barge.minBargeinWordCount = 1);
171
190
  barge.input = [];
172
191
  barge.actionHook = "voice";
173
192
  barge.dtmfBargein = bargeIn.bargeInOnDtmf;
@@ -192,9 +211,8 @@ exports.setSessionConfig = {
192
211
  : dtmf.dtmfInterDigitTimeout / 1000
193
212
  : undefined;
194
213
  /* Max digits. min: 1, max: - default: 1 */
195
- barge.maxDigits = dtmf.dtmfMaxDigits >= 1
196
- ? barge.maxDigits = dtmf.dtmfMaxDigits
197
- : 1;
214
+ barge.maxDigits =
215
+ dtmf.dtmfMaxDigits >= 1 ? (barge.maxDigits = dtmf.dtmfMaxDigits) : 1;
198
216
  barge.minDigits = dtmf.dtmfMinDigits || 1;
199
217
  barge.finishOnKey = dtmf.dtmfSubmitDigit;
200
218
  }
@@ -206,6 +224,10 @@ exports.setSessionConfig = {
206
224
  bargeIn: barge || undefined,
207
225
  };
208
226
  (0, strip_nulls_1.stripNulls)(voiceConfig);
227
+ if (voiceConfig.synthesizer && !voiceConfig.synthesizer.azureServiceEndpoint)
228
+ voiceConfig.synthesizer.azureServiceEndpoint = "";
229
+ if (voiceConfig.recognizer && !voiceConfig.recognizer.azureSttEndpointId)
230
+ voiceConfig.recognizer.azureSttEndpointId = "";
209
231
  // Check if there are configs for No User Input and DTMF
210
232
  const cognigyConfig = {
211
233
  user: user ? user : undefined,
@@ -228,7 +250,7 @@ exports.setSessionConfig = {
228
250
  return payload;
229
251
  },
230
252
  handleAudioCodesInput(voiceSettings) {
231
- const { bargeIn, continuousAsr, stt, tts, userNoInput, dtmf } = voiceSettings;
253
+ const { bargeIn, continuousAsr, stt, tts, userNoInput, dtmf, azureConfig } = voiceSettings;
232
254
  const sessionParams = {};
233
255
  if (stt) {
234
256
  sessionParams.sttLanguage = stt.sttLanguage;
@@ -244,6 +266,10 @@ exports.setSessionConfig = {
244
266
  if (tts) {
245
267
  sessionParams.ttsLanguage = tts.ttsLanguage;
246
268
  sessionParams.voiceName = tts.ttsVoice;
269
+ if (tts.azureServiceEndpoint) {
270
+ sessionParams.ttsDeploymentId = tts.azureServiceEndpoint;
271
+ }
272
+ ;
247
273
  }
248
274
  if (dtmf) {
249
275
  sessionParams.sendDTMF = dtmf.dtmfEnable;
@@ -281,6 +307,13 @@ exports.setSessionConfig = {
281
307
  sessionParams.userNoInputRetries > 0)
282
308
  sessionParams.userNoInputUrl = userNoInput.userNoInputUrl;
283
309
  }
310
+ if (azureConfig === null || azureConfig === void 0 ? void 0 : azureConfig.azureSttContextId) {
311
+ sessionParams.sttContextId = azureConfig.azureSttContextId;
312
+ if (azureConfig.azureEnableAudioLogging) {
313
+ sessionParams.azureEnableAudioLogging =
314
+ azureConfig.azureEnableAudioLogging;
315
+ }
316
+ }
284
317
  const activities = {
285
318
  type: "event",
286
319
  name: "config",
@@ -1,52 +1,108 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.transfer = void 0;
3
+ exports.prepareTransferParams = exports.transfer = void 0;
4
+ /* Node modules */
5
+ const path_1 = require("path");
6
+ /* Custom modules */
4
7
  const constants_1 = require("../utils/constants");
8
+ const helper_1 = require("../../../descriptors/voicegateway2/utils/helper");
9
+ const helper_2 = require("../utils/helper");
5
10
  exports.transfer = {
6
11
  handleInput(endpointType, params, isGenericNode) {
7
12
  try {
8
- if (endpointType === constants_1.ENDPOINTS.AUDIO_CODES && isGenericNode && params.transferSipHeaders) {
9
- const { transferReason, transferTarget, useTransferSipHeaders, transferSipHeaders } = params;
10
- let headers = [];
11
- for (const [key, value] of Object.entries(transferSipHeaders)) {
12
- const entry = { "name": key, "value": value };
13
- headers.push(entry);
14
- }
15
- ;
16
- const target = transferTarget.includes("@")
17
- ? transferTarget
18
- : `tel:${transferTarget}`;
19
- return this.handleAudioCodesInput({ transferReason, transferTarget: target, useTransferSipHeaders, transferSipHeaders: headers });
20
- }
21
- if (endpointType === constants_1.ENDPOINTS.VOICE_GATEWAY) {
22
- return this.handleVGInput(params);
23
- }
24
- else if (endpointType === constants_1.ENDPOINTS.AUDIO_CODES) {
25
- return this.handleAudioCodesInput(params);
26
- }
27
- else {
28
- return this.handleVGInput(params);
13
+ switch (endpointType) {
14
+ case constants_1.ENDPOINTS.AUDIO_CODES:
15
+ if (isGenericNode && params.transferSipHeaders) {
16
+ let headers = [];
17
+ for (const [key, value] of Object.entries(params.transferSipHeaders)) {
18
+ headers.push({
19
+ name: key,
20
+ value
21
+ });
22
+ }
23
+ ;
24
+ params.transferSipHeaders = headers;
25
+ }
26
+ return this.handleAudioCodesInput((0, exports.prepareTransferParams)(params));
27
+ case constants_1.ENDPOINTS.VOICE_GATEWAY:
28
+ default:
29
+ return this.handleVGInput((0, exports.prepareTransferParams)(params));
29
30
  }
30
- ;
31
31
  }
32
32
  catch (error) {
33
33
  throw Error(error.message);
34
34
  }
35
35
  },
36
36
  handleVGInput(transferParam) {
37
- const { transferTarget, useTransferSipHeaders, transferSipHeaders } = transferParam;
37
+ const { transferType, transferTarget, transferReason, useTransferSipHeaders, transferSipHeaders, dialMusic, dialTranscriptionWebhook, dialCallerId } = transferParam;
38
38
  const payload = {
39
39
  _voiceGateway2: {
40
- json: {
41
- "sip:refer": {
42
- "referTo": transferTarget,
43
- "actionHook": 'refer'
44
- }
45
- }
40
+ json: {}
46
41
  }
47
42
  };
43
+ let headers = {
44
+ "X-Reason": transferReason,
45
+ };
48
46
  if (useTransferSipHeaders && typeof transferSipHeaders === "object") {
49
- payload._voiceGateway2.json["sip:refer"]["headers"] = transferSipHeaders;
47
+ headers = Object.assign(Object.assign({}, headers), transferSipHeaders);
48
+ }
49
+ switch (transferType) {
50
+ case "dial":
51
+ const dialVerb = {
52
+ actionHook: "dial",
53
+ headers,
54
+ target: [],
55
+ };
56
+ const phoneTarget = {
57
+ type: "phone",
58
+ number: (0, helper_1.cleanTarget)(transferTarget, false),
59
+ };
60
+ const sipTarget = {
61
+ type: "sip",
62
+ sipUri: (0, helper_1.cleanTarget)(transferTarget, false),
63
+ };
64
+ /* By default we set the target to phone */
65
+ dialVerb.target = [phoneTarget];
66
+ /* If targets includes an @ we set the target to sip */
67
+ if (transferTarget === null || transferTarget === void 0 ? void 0 : transferTarget.includes("@")) {
68
+ dialVerb.target = [sipTarget];
69
+ }
70
+ if (dialTranscriptionWebhook === null || dialTranscriptionWebhook === void 0 ? void 0 : dialTranscriptionWebhook.length) {
71
+ if (!(0, helper_2.isValidUrl)(dialTranscriptionWebhook)) {
72
+ throw new Error(`Transcription webhook URL is invalid ${dialTranscriptionWebhook}`);
73
+ }
74
+ const transcribeVerb = {
75
+ transcriptionHook: dialTranscriptionWebhook,
76
+ };
77
+ dialVerb.transcribe = transcribeVerb;
78
+ }
79
+ if (dialMusic === null || dialMusic === void 0 ? void 0 : dialMusic.length) {
80
+ if (!(0, helper_2.isValidUrl)(dialMusic)) {
81
+ throw new Error(`Audio file URL is invalid ${dialMusic}`);
82
+ }
83
+ const extension = (0, path_1.extname)(dialMusic);
84
+ if (extension !== (".mp3" || ".wav")) {
85
+ throw new Error(`Audio file must be from type .mp3 or .wav. Extension was: ${extension}`);
86
+ }
87
+ dialVerb.dialMusic = dialMusic;
88
+ }
89
+ if (dialCallerId === null || dialCallerId === void 0 ? void 0 : dialCallerId.length) {
90
+ if (!(0, helper_2.isValidPhoneNumber)(dialCallerId)) {
91
+ throw new Error(`Caller ID must be a valid phone number ${dialCallerId}`);
92
+ }
93
+ dialVerb.callerId = dialCallerId;
94
+ }
95
+ payload._voiceGateway2.json["dial"] = dialVerb;
96
+ break;
97
+ case "refer":
98
+ default:
99
+ const referVerb = {
100
+ "referTo": (0, helper_1.cleanTarget)(transferTarget, false),
101
+ "actionHook": 'refer',
102
+ headers
103
+ };
104
+ payload._voiceGateway2.json["sip:refer"] = referVerb;
105
+ break;
50
106
  }
51
107
  return payload;
52
108
  },
@@ -56,7 +112,7 @@ exports.transfer = {
56
112
  "type": "event",
57
113
  "name": "transfer",
58
114
  "activityParams": {
59
- "transferTarget": transferTarget,
115
+ "transferTarget": (0, helper_1.cleanTarget)(transferTarget, true),
60
116
  "handoverReason": transferReason,
61
117
  }
62
118
  };
@@ -83,4 +139,9 @@ exports.transfer = {
83
139
  return payload;
84
140
  }
85
141
  };
142
+ const prepareTransferParams = (transferParam) => {
143
+ const { transferReason, dialMusic, dialTranscriptionWebhook } = transferParam;
144
+ return Object.assign(Object.assign({}, transferParam), { transferReason: transferReason === null || transferReason === void 0 ? void 0 : transferReason.trim(), dialMusic: dialMusic === null || dialMusic === void 0 ? void 0 : dialMusic.trim(), dialTranscriptionWebhook: dialTranscriptionWebhook === null || dialTranscriptionWebhook === void 0 ? void 0 : dialTranscriptionWebhook.trim() });
145
+ };
146
+ exports.prepareTransferParams = prepareTransferParams;
86
147
  //# sourceMappingURL=transfer.mapper.js.map