@onereach/step-voice 6.0.16 → 6.0.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dst/Choice.d.ts +1 -0
- package/dst/Choice.js +15 -12
- package/dst/Custom Voice Input.js +4 -2
- package/dst/Global Command.d.ts +0 -1
- package/dst/Global Command.js +3 -20
- package/dst/Keypad Input.js +4 -2
- package/dst/voice.d.ts +5 -2
- package/dst/voice.js +53 -21
- package/package.json +1 -1
package/dst/Choice.d.ts
CHANGED
package/dst/Choice.js
CHANGED
|
@@ -69,7 +69,7 @@ const isRepromptTrigger = (recogResult, promptsTriggers) => {
|
|
|
69
69
|
class Choice extends voice_1.default {
|
|
70
70
|
async runStep() {
|
|
71
71
|
const call = await this.fetchData();
|
|
72
|
-
const { textType, asr, tts, sensitiveData, noReplyDelay, usePromptsTriggers, recognitionModel, useInterspeechTimeout, interSpeechTimeout } = this.data;
|
|
72
|
+
const { textType, asr, tts, sensitiveData, noReplyDelay, usePromptsTriggers, recognitionModel, useInterspeechTimeout, interSpeechTimeout, longRecognition } = this.data;
|
|
73
73
|
const exitExists = (exitId) => {
|
|
74
74
|
return lodash_1.default.some(choices, (choice) => choice.exitId === exitId);
|
|
75
75
|
};
|
|
@@ -87,7 +87,9 @@ class Choice extends voice_1.default {
|
|
|
87
87
|
name: 'speak',
|
|
88
88
|
params: {
|
|
89
89
|
grammar,
|
|
90
|
-
dictation:
|
|
90
|
+
dictation: longRecognition
|
|
91
|
+
? 'continuous'
|
|
92
|
+
: useInterspeechTimeout,
|
|
91
93
|
interSpeechTimeout: interSpeechTimeout * 1000,
|
|
92
94
|
sections: []
|
|
93
95
|
}
|
|
@@ -111,7 +113,7 @@ class Choice extends voice_1.default {
|
|
|
111
113
|
actionFromBot: false
|
|
112
114
|
});
|
|
113
115
|
await this.resumeRecording(call, sensitiveData);
|
|
114
|
-
return this.exitStep(exitId, this.exitChoiceData('dtmf', params));
|
|
116
|
+
return this.exitStep(exitId, this.exitChoiceData('dtmf', params), longRecognition);
|
|
115
117
|
}
|
|
116
118
|
else if (this.rptsHasMore({ repromptsList })) {
|
|
117
119
|
await this.transcript(call, {
|
|
@@ -127,7 +129,8 @@ class Choice extends voice_1.default {
|
|
|
127
129
|
noReplyDelay,
|
|
128
130
|
speechSections,
|
|
129
131
|
textType,
|
|
130
|
-
ttsSettings
|
|
132
|
+
ttsSettings,
|
|
133
|
+
sensitiveData
|
|
131
134
|
});
|
|
132
135
|
return this.exitFlow();
|
|
133
136
|
}
|
|
@@ -139,9 +142,7 @@ class Choice extends voice_1.default {
|
|
|
139
142
|
actionFromBot: false
|
|
140
143
|
});
|
|
141
144
|
await this.resumeRecording(call, sensitiveData);
|
|
142
|
-
return this.exitStep('unrecognized', this.exitChoiceData('dtmf', {
|
|
143
|
-
digit
|
|
144
|
-
}));
|
|
145
|
+
return this.exitStep('unrecognized', this.exitChoiceData('dtmf', { digit }), longRecognition);
|
|
145
146
|
}
|
|
146
147
|
case 'recognition': {
|
|
147
148
|
const params = event.params;
|
|
@@ -166,7 +167,7 @@ class Choice extends voice_1.default {
|
|
|
166
167
|
});
|
|
167
168
|
await this.resumeRecording(call, sensitiveData);
|
|
168
169
|
// There might be hooks after this step which we will try to avoid
|
|
169
|
-
return this.exitStep(exitId, this.exitChoiceData('voice', params));
|
|
170
|
+
return this.exitStep(exitId, this.exitChoiceData('voice', params), longRecognition);
|
|
170
171
|
}
|
|
171
172
|
else if (this.rptsHasMore({ repromptsList }) &&
|
|
172
173
|
(usePromptsTriggers ? isRepromptTrigger(phrases, this.data.promptsTriggers) : true)) {
|
|
@@ -183,7 +184,8 @@ class Choice extends voice_1.default {
|
|
|
183
184
|
noReplyDelay,
|
|
184
185
|
speechSections,
|
|
185
186
|
textType,
|
|
186
|
-
ttsSettings
|
|
187
|
+
ttsSettings,
|
|
188
|
+
sensitiveData
|
|
187
189
|
});
|
|
188
190
|
return this.exitFlow();
|
|
189
191
|
}
|
|
@@ -197,7 +199,7 @@ class Choice extends voice_1.default {
|
|
|
197
199
|
});
|
|
198
200
|
await this.resumeRecording(call, sensitiveData);
|
|
199
201
|
// We might end up in same session
|
|
200
|
-
return this.exitStep('unrecognized', this.exitChoiceData('voice', params));
|
|
202
|
+
return this.exitStep('unrecognized', this.exitChoiceData('voice', params), longRecognition);
|
|
201
203
|
}
|
|
202
204
|
case 'timeout': {
|
|
203
205
|
if (this.rptsHasMore({ repromptsList })) {
|
|
@@ -213,7 +215,8 @@ class Choice extends voice_1.default {
|
|
|
213
215
|
noReplyDelay,
|
|
214
216
|
speechSections,
|
|
215
217
|
textType,
|
|
216
|
-
ttsSettings
|
|
218
|
+
ttsSettings,
|
|
219
|
+
sensitiveData
|
|
217
220
|
});
|
|
218
221
|
return this.exitFlow();
|
|
219
222
|
}
|
|
@@ -225,7 +228,7 @@ class Choice extends voice_1.default {
|
|
|
225
228
|
});
|
|
226
229
|
await this.resumeRecording(call, sensitiveData);
|
|
227
230
|
// We might end up in same session
|
|
228
|
-
return this.exitStep('no reply', {});
|
|
231
|
+
return this.exitStep('no reply', {}, longRecognition);
|
|
229
232
|
}
|
|
230
233
|
case 'hangup': {
|
|
231
234
|
await this.handleHangup(call);
|
|
@@ -131,7 +131,8 @@ class CustomVoiceInput extends voice_1.default {
|
|
|
131
131
|
noReplyDelay,
|
|
132
132
|
speechSections,
|
|
133
133
|
textType,
|
|
134
|
-
ttsSettings
|
|
134
|
+
ttsSettings,
|
|
135
|
+
sensitiveData
|
|
135
136
|
});
|
|
136
137
|
return this.exitFlow();
|
|
137
138
|
}
|
|
@@ -161,7 +162,8 @@ class CustomVoiceInput extends voice_1.default {
|
|
|
161
162
|
noReplyDelay,
|
|
162
163
|
speechSections,
|
|
163
164
|
textType,
|
|
164
|
-
ttsSettings
|
|
165
|
+
ttsSettings,
|
|
166
|
+
sensitiveData
|
|
165
167
|
});
|
|
166
168
|
return this.exitFlow();
|
|
167
169
|
}
|
package/dst/Global Command.d.ts
CHANGED
|
@@ -37,7 +37,6 @@ export default class GlobalCommand extends VoiceStep<Partial<INPUT>, OUTPUT, EVE
|
|
|
37
37
|
worker(): Promise<void>;
|
|
38
38
|
hangup(call: IVoiceCall): Promise<unknown>;
|
|
39
39
|
exitThread(event: ITypedEvent<EVENT>, type: string, stepExit: string): Promise<void>;
|
|
40
|
-
exitToThread(): void;
|
|
41
40
|
buildGrammar(call: IVoiceCall, choices: TODO[]): Promise<any>;
|
|
42
41
|
replyAck(eventParams: any): void;
|
|
43
42
|
}
|
package/dst/Global Command.js
CHANGED
|
@@ -4,7 +4,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
4
4
|
const tslib_1 = require("tslib");
|
|
5
5
|
const types_1 = require("@onereach/flow-sdk/dst/types");
|
|
6
6
|
const lodash_1 = tslib_1.__importDefault(require("lodash"));
|
|
7
|
-
const nanoid_1 = require("nanoid");
|
|
8
7
|
const voice_1 = tslib_1.__importDefault(require("./voice"));
|
|
9
8
|
class GlobalCommand extends voice_1.default {
|
|
10
9
|
get isGlobal() {
|
|
@@ -119,6 +118,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
119
118
|
reportingSettingsKey: 'transcript',
|
|
120
119
|
actionFromBot: false
|
|
121
120
|
});
|
|
121
|
+
await this.resumeRecording(call, { muteStep: true, muteUser: false, muteBot: false });
|
|
122
122
|
return await this.exitThread(event, 'digit', exitId);
|
|
123
123
|
}
|
|
124
124
|
else {
|
|
@@ -142,6 +142,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
142
142
|
reportingSettingsKey: 'transcript',
|
|
143
143
|
actionFromBot: false
|
|
144
144
|
});
|
|
145
|
+
await this.resumeRecording(call, { muteStep: true, muteUser: false, muteBot: false });
|
|
145
146
|
return await this.exitThread(event, 'voice', exitId);
|
|
146
147
|
}
|
|
147
148
|
else {
|
|
@@ -212,7 +213,6 @@ class GlobalCommand extends voice_1.default {
|
|
|
212
213
|
});
|
|
213
214
|
}
|
|
214
215
|
async exitThread(event, type, stepExit) {
|
|
215
|
-
this.log.debug('exitThread', type, stepExit);
|
|
216
216
|
const params = event.params;
|
|
217
217
|
const result = { type };
|
|
218
218
|
if (!lodash_1.default.isEmpty(params.tags)) {
|
|
@@ -235,24 +235,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
235
235
|
if (!lodash_1.default.isEmpty(params.callRecording)) {
|
|
236
236
|
result.callRecording = params.callRecording;
|
|
237
237
|
}
|
|
238
|
-
|
|
239
|
-
await this.process.runThread({
|
|
240
|
-
id: `${exitLabel}_${(0, nanoid_1.nanoid)(8)}`,
|
|
241
|
-
state: {
|
|
242
|
-
name: this.exitToThread.name,
|
|
243
|
-
direct: true,
|
|
244
|
-
result: {
|
|
245
|
-
conversation: this.conversation,
|
|
246
|
-
conversationThreadId: this.dataThreadId,
|
|
247
|
-
...result
|
|
248
|
-
},
|
|
249
|
-
exitStep: stepExit,
|
|
250
|
-
step: this.currentStepId
|
|
251
|
-
}
|
|
252
|
-
});
|
|
253
|
-
}
|
|
254
|
-
exitToThread() {
|
|
255
|
-
this.thread.exitStep(this.state.exitStep, this.state.result);
|
|
238
|
+
await this.exitStepByThread(stepExit, result);
|
|
256
239
|
}
|
|
257
240
|
async buildGrammar(call, choices) {
|
|
258
241
|
const { asr } = this.data;
|
package/dst/Keypad Input.js
CHANGED
|
@@ -102,7 +102,8 @@ class KeypadInput extends voice_1.default {
|
|
|
102
102
|
noReplyDelay,
|
|
103
103
|
speechSections,
|
|
104
104
|
textType,
|
|
105
|
-
ttsSettings
|
|
105
|
+
ttsSettings,
|
|
106
|
+
sensitiveData
|
|
106
107
|
});
|
|
107
108
|
return this.exitFlow();
|
|
108
109
|
}
|
|
@@ -131,7 +132,8 @@ class KeypadInput extends voice_1.default {
|
|
|
131
132
|
noReplyDelay,
|
|
132
133
|
speechSections,
|
|
133
134
|
textType,
|
|
134
|
-
ttsSettings
|
|
135
|
+
ttsSettings,
|
|
136
|
+
sensitiveData
|
|
135
137
|
});
|
|
136
138
|
return this.exitFlow();
|
|
137
139
|
}
|
package/dst/voice.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ export interface SensitiveData {
|
|
|
8
8
|
muteBot: boolean;
|
|
9
9
|
}
|
|
10
10
|
export interface IVoiceCall extends IConversationData {
|
|
11
|
+
/** voicer protocol version */
|
|
11
12
|
vv: number;
|
|
12
13
|
id: string;
|
|
13
14
|
ended: boolean;
|
|
@@ -51,7 +52,9 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
|
|
|
51
52
|
handleZombie?: boolean;
|
|
52
53
|
}, TOut, TParams> {
|
|
53
54
|
runBefore(): Promise<void>;
|
|
54
|
-
exitStep(
|
|
55
|
+
exitStep(exitId: string, data?: any, byThread?: boolean): any;
|
|
56
|
+
exitStepByThread(exitId: string, result: any): any;
|
|
57
|
+
exitToThread(): void;
|
|
55
58
|
sendCommands({ id, type, callback }: IVoiceCall, commands: TODO[]): Promise<void>;
|
|
56
59
|
handleDataThreadStart(): void;
|
|
57
60
|
handleDataThreadEnd(): void;
|
|
@@ -72,7 +75,7 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
|
|
|
72
75
|
buildChoices({ choices }: TODO): TODO;
|
|
73
76
|
buildChoice(choice: TODO): TODO;
|
|
74
77
|
exitChoiceData(type: string, params?: TODO): TODO;
|
|
75
|
-
rptsSend(call: IVoiceCall, { command, reporting, repromptsList, noReplyDelay, speechSections, textType, ttsSettings }: TODO): Promise<void>;
|
|
78
|
+
rptsSend(call: IVoiceCall, { command, reporting, repromptsList, noReplyDelay, speechSections, textType, ttsSettings, sensitiveData }: TODO): Promise<void>;
|
|
76
79
|
rptsTimeout({ noReplyDelay, repromptsList }: TODO): number;
|
|
77
80
|
rptsRestart(): void;
|
|
78
81
|
rptsHasMore({ repromptsList }: TODO): boolean;
|
package/dst/voice.js
CHANGED
|
@@ -4,6 +4,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
4
4
|
exports.VoiceStepError = void 0;
|
|
5
5
|
const tslib_1 = require("tslib");
|
|
6
6
|
const lodash_1 = tslib_1.__importDefault(require("lodash"));
|
|
7
|
+
const nanoid_1 = require("nanoid");
|
|
7
8
|
const types_1 = require("@onereach/flow-sdk/dst/types");
|
|
8
9
|
const uuid_1 = require("uuid");
|
|
9
10
|
// TODO: !!!!! import ConvStep from '@onereach/step-conversation/dst/step' !!!!!
|
|
@@ -22,11 +23,34 @@ class VoiceStep extends step_1.default {
|
|
|
22
23
|
await this.handleHeartbeat(this.cache);
|
|
23
24
|
}
|
|
24
25
|
}
|
|
25
|
-
exitStep(
|
|
26
|
+
exitStep(exitId, data, byThread = false) {
|
|
27
|
+
if (byThread)
|
|
28
|
+
return this.exitStepByThread(exitId, data);
|
|
26
29
|
if (this.cache != null) {
|
|
27
30
|
this.handleDataThreadEnd();
|
|
28
31
|
}
|
|
29
|
-
return super.exitStep(
|
|
32
|
+
return super.exitStep(exitId, data);
|
|
33
|
+
}
|
|
34
|
+
exitStepByThread(exitId, result) {
|
|
35
|
+
this.log.debug('exitStepByThread', exitId, result);
|
|
36
|
+
const exitLabel = lodash_1.default.replace(this.getExitStepLabel(exitId) ?? exitId, /\W+/g, '');
|
|
37
|
+
return this.process.runThread({
|
|
38
|
+
id: `${exitLabel}_${(0, nanoid_1.nanoid)(8)}`,
|
|
39
|
+
state: {
|
|
40
|
+
name: this.exitToThread.name,
|
|
41
|
+
direct: true,
|
|
42
|
+
result: {
|
|
43
|
+
conversation: this.conversation,
|
|
44
|
+
conversationThreadId: this.dataThreadId,
|
|
45
|
+
...result
|
|
46
|
+
},
|
|
47
|
+
exitStep: exitId,
|
|
48
|
+
step: this.currentStepId
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
exitToThread() {
|
|
53
|
+
this.thread.exitStep(this.state.exitStep, this.state.result);
|
|
30
54
|
}
|
|
31
55
|
async sendCommands({ id, type, callback }, commands) {
|
|
32
56
|
if (lodash_1.default.isEmpty(commands))
|
|
@@ -146,29 +170,34 @@ class VoiceStep extends step_1.default {
|
|
|
146
170
|
.value();
|
|
147
171
|
}
|
|
148
172
|
async pauseRecording(call, command, sensitiveData) {
|
|
149
|
-
|
|
150
|
-
...call.recordCall && sensitiveData?.muteStep
|
|
151
|
-
? [{
|
|
152
|
-
name: 'stop-record-session',
|
|
153
|
-
params: {
|
|
154
|
-
muteUser: sensitiveData.muteUser,
|
|
155
|
-
muteBot: sensitiveData.muteBot
|
|
156
|
-
}
|
|
157
|
-
}]
|
|
158
|
-
: [],
|
|
159
|
-
command
|
|
160
|
-
]);
|
|
161
|
-
}
|
|
162
|
-
async resumeRecording(call, sensitiveData) {
|
|
173
|
+
const commands = [command];
|
|
163
174
|
if (call.recordCall && sensitiveData?.muteStep) {
|
|
164
|
-
|
|
165
|
-
|
|
175
|
+
if (call.vv >= 2) {
|
|
176
|
+
// newer voicer version automaically should stop/resume session recording
|
|
177
|
+
command.params.sensitiveData = sensitiveData;
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
commands.unshift({
|
|
181
|
+
name: 'stop-record-session',
|
|
166
182
|
params: {
|
|
167
183
|
muteUser: sensitiveData.muteUser,
|
|
168
184
|
muteBot: sensitiveData.muteBot
|
|
169
185
|
}
|
|
170
|
-
}
|
|
186
|
+
});
|
|
187
|
+
}
|
|
171
188
|
}
|
|
189
|
+
await this.sendCommands(call, commands);
|
|
190
|
+
}
|
|
191
|
+
async resumeRecording(call, sensitiveData) {
|
|
192
|
+
if (!call.recordCall || !sensitiveData?.muteStep || call.vv >= 2)
|
|
193
|
+
return;
|
|
194
|
+
await this.sendCommands(call, [{
|
|
195
|
+
name: 'resume-record-session',
|
|
196
|
+
params: {
|
|
197
|
+
muteUser: sensitiveData.muteUser,
|
|
198
|
+
muteBot: sensitiveData.muteBot
|
|
199
|
+
}
|
|
200
|
+
}]);
|
|
172
201
|
}
|
|
173
202
|
async transcript(call, data = {}) {
|
|
174
203
|
const { previousTranscriptId = call.lastTranscriptId, action, keyPress, message, voiceProcessResult, reportingSettingsKey, sections, reprompt, recording, conferenceId, actionFromBot = false } = data;
|
|
@@ -307,7 +336,7 @@ class VoiceStep extends step_1.default {
|
|
|
307
336
|
return data;
|
|
308
337
|
}
|
|
309
338
|
;
|
|
310
|
-
async rptsSend(call, { command, reporting, repromptsList, noReplyDelay, speechSections, textType, ttsSettings }) {
|
|
339
|
+
async rptsSend(call, { command, reporting, repromptsList, noReplyDelay, speechSections, textType, ttsSettings, sensitiveData }) {
|
|
311
340
|
const index = this.rptsIndex;
|
|
312
341
|
const current = repromptsList[index];
|
|
313
342
|
const params = command.params;
|
|
@@ -343,7 +372,10 @@ class VoiceStep extends step_1.default {
|
|
|
343
372
|
lodash_1.default.assign(reportingObject, reporting);
|
|
344
373
|
}
|
|
345
374
|
await this.transcript(call, reportingObject);
|
|
346
|
-
|
|
375
|
+
if (call.vv >= 2)
|
|
376
|
+
await this.pauseRecording(call, command, sensitiveData);
|
|
377
|
+
else
|
|
378
|
+
await this.sendCommands(call, [command]);
|
|
347
379
|
}
|
|
348
380
|
rptsTimeout({ noReplyDelay, repromptsList }) {
|
|
349
381
|
const current = repromptsList[this.rptsIndex];
|