@onereach/step-voice 6.0.16 → 6.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dst/Choice.d.ts +1 -0
- package/dst/Choice.js +9 -9
- package/dst/Global Command.d.ts +0 -1
- package/dst/Global Command.js +3 -20
- package/dst/voice.d.ts +4 -1
- package/dst/voice.js +48 -19
- package/package.json +1 -1
package/dst/Choice.d.ts
CHANGED
package/dst/Choice.js
CHANGED
|
@@ -69,7 +69,7 @@ const isRepromptTrigger = (recogResult, promptsTriggers) => {
|
|
|
69
69
|
class Choice extends voice_1.default {
|
|
70
70
|
async runStep() {
|
|
71
71
|
const call = await this.fetchData();
|
|
72
|
-
const { textType, asr, tts, sensitiveData, noReplyDelay, usePromptsTriggers, recognitionModel, useInterspeechTimeout, interSpeechTimeout } = this.data;
|
|
72
|
+
const { textType, asr, tts, sensitiveData, noReplyDelay, usePromptsTriggers, recognitionModel, useInterspeechTimeout, interSpeechTimeout, longRecognition } = this.data;
|
|
73
73
|
const exitExists = (exitId) => {
|
|
74
74
|
return lodash_1.default.some(choices, (choice) => choice.exitId === exitId);
|
|
75
75
|
};
|
|
@@ -87,7 +87,9 @@ class Choice extends voice_1.default {
|
|
|
87
87
|
name: 'speak',
|
|
88
88
|
params: {
|
|
89
89
|
grammar,
|
|
90
|
-
dictation:
|
|
90
|
+
dictation: longRecognition
|
|
91
|
+
? 'continuous'
|
|
92
|
+
: useInterspeechTimeout,
|
|
91
93
|
interSpeechTimeout: interSpeechTimeout * 1000,
|
|
92
94
|
sections: []
|
|
93
95
|
}
|
|
@@ -111,7 +113,7 @@ class Choice extends voice_1.default {
|
|
|
111
113
|
actionFromBot: false
|
|
112
114
|
});
|
|
113
115
|
await this.resumeRecording(call, sensitiveData);
|
|
114
|
-
return this.exitStep(exitId, this.exitChoiceData('dtmf', params));
|
|
116
|
+
return this.exitStep(exitId, this.exitChoiceData('dtmf', params), longRecognition);
|
|
115
117
|
}
|
|
116
118
|
else if (this.rptsHasMore({ repromptsList })) {
|
|
117
119
|
await this.transcript(call, {
|
|
@@ -139,9 +141,7 @@ class Choice extends voice_1.default {
|
|
|
139
141
|
actionFromBot: false
|
|
140
142
|
});
|
|
141
143
|
await this.resumeRecording(call, sensitiveData);
|
|
142
|
-
return this.exitStep('unrecognized', this.exitChoiceData('dtmf', {
|
|
143
|
-
digit
|
|
144
|
-
}));
|
|
144
|
+
return this.exitStep('unrecognized', this.exitChoiceData('dtmf', { digit }), longRecognition);
|
|
145
145
|
}
|
|
146
146
|
case 'recognition': {
|
|
147
147
|
const params = event.params;
|
|
@@ -166,7 +166,7 @@ class Choice extends voice_1.default {
|
|
|
166
166
|
});
|
|
167
167
|
await this.resumeRecording(call, sensitiveData);
|
|
168
168
|
// There might be hooks after this step which we will try to avoid
|
|
169
|
-
return this.exitStep(exitId, this.exitChoiceData('voice', params));
|
|
169
|
+
return this.exitStep(exitId, this.exitChoiceData('voice', params), longRecognition);
|
|
170
170
|
}
|
|
171
171
|
else if (this.rptsHasMore({ repromptsList }) &&
|
|
172
172
|
(usePromptsTriggers ? isRepromptTrigger(phrases, this.data.promptsTriggers) : true)) {
|
|
@@ -197,7 +197,7 @@ class Choice extends voice_1.default {
|
|
|
197
197
|
});
|
|
198
198
|
await this.resumeRecording(call, sensitiveData);
|
|
199
199
|
// We might end up in same session
|
|
200
|
-
return this.exitStep('unrecognized', this.exitChoiceData('voice', params));
|
|
200
|
+
return this.exitStep('unrecognized', this.exitChoiceData('voice', params), longRecognition);
|
|
201
201
|
}
|
|
202
202
|
case 'timeout': {
|
|
203
203
|
if (this.rptsHasMore({ repromptsList })) {
|
|
@@ -225,7 +225,7 @@ class Choice extends voice_1.default {
|
|
|
225
225
|
});
|
|
226
226
|
await this.resumeRecording(call, sensitiveData);
|
|
227
227
|
// We might end up in same session
|
|
228
|
-
return this.exitStep('no reply', {});
|
|
228
|
+
return this.exitStep('no reply', {}, longRecognition);
|
|
229
229
|
}
|
|
230
230
|
case 'hangup': {
|
|
231
231
|
await this.handleHangup(call);
|
package/dst/Global Command.d.ts
CHANGED
|
@@ -37,7 +37,6 @@ export default class GlobalCommand extends VoiceStep<Partial<INPUT>, OUTPUT, EVE
|
|
|
37
37
|
worker(): Promise<void>;
|
|
38
38
|
hangup(call: IVoiceCall): Promise<unknown>;
|
|
39
39
|
exitThread(event: ITypedEvent<EVENT>, type: string, stepExit: string): Promise<void>;
|
|
40
|
-
exitToThread(): void;
|
|
41
40
|
buildGrammar(call: IVoiceCall, choices: TODO[]): Promise<any>;
|
|
42
41
|
replyAck(eventParams: any): void;
|
|
43
42
|
}
|
package/dst/Global Command.js
CHANGED
|
@@ -4,7 +4,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
4
4
|
const tslib_1 = require("tslib");
|
|
5
5
|
const types_1 = require("@onereach/flow-sdk/dst/types");
|
|
6
6
|
const lodash_1 = tslib_1.__importDefault(require("lodash"));
|
|
7
|
-
const nanoid_1 = require("nanoid");
|
|
8
7
|
const voice_1 = tslib_1.__importDefault(require("./voice"));
|
|
9
8
|
class GlobalCommand extends voice_1.default {
|
|
10
9
|
get isGlobal() {
|
|
@@ -119,6 +118,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
119
118
|
reportingSettingsKey: 'transcript',
|
|
120
119
|
actionFromBot: false
|
|
121
120
|
});
|
|
121
|
+
await this.resumeRecording(call, { muteStep: true, muteUser: false, muteBot: false });
|
|
122
122
|
return await this.exitThread(event, 'digit', exitId);
|
|
123
123
|
}
|
|
124
124
|
else {
|
|
@@ -142,6 +142,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
142
142
|
reportingSettingsKey: 'transcript',
|
|
143
143
|
actionFromBot: false
|
|
144
144
|
});
|
|
145
|
+
await this.resumeRecording(call, { muteStep: true, muteUser: false, muteBot: false });
|
|
145
146
|
return await this.exitThread(event, 'voice', exitId);
|
|
146
147
|
}
|
|
147
148
|
else {
|
|
@@ -212,7 +213,6 @@ class GlobalCommand extends voice_1.default {
|
|
|
212
213
|
});
|
|
213
214
|
}
|
|
214
215
|
async exitThread(event, type, stepExit) {
|
|
215
|
-
this.log.debug('exitThread', type, stepExit);
|
|
216
216
|
const params = event.params;
|
|
217
217
|
const result = { type };
|
|
218
218
|
if (!lodash_1.default.isEmpty(params.tags)) {
|
|
@@ -235,24 +235,7 @@ class GlobalCommand extends voice_1.default {
|
|
|
235
235
|
if (!lodash_1.default.isEmpty(params.callRecording)) {
|
|
236
236
|
result.callRecording = params.callRecording;
|
|
237
237
|
}
|
|
238
|
-
|
|
239
|
-
await this.process.runThread({
|
|
240
|
-
id: `${exitLabel}_${(0, nanoid_1.nanoid)(8)}`,
|
|
241
|
-
state: {
|
|
242
|
-
name: this.exitToThread.name,
|
|
243
|
-
direct: true,
|
|
244
|
-
result: {
|
|
245
|
-
conversation: this.conversation,
|
|
246
|
-
conversationThreadId: this.dataThreadId,
|
|
247
|
-
...result
|
|
248
|
-
},
|
|
249
|
-
exitStep: stepExit,
|
|
250
|
-
step: this.currentStepId
|
|
251
|
-
}
|
|
252
|
-
});
|
|
253
|
-
}
|
|
254
|
-
exitToThread() {
|
|
255
|
-
this.thread.exitStep(this.state.exitStep, this.state.result);
|
|
238
|
+
await this.exitStepByThread(stepExit, result);
|
|
256
239
|
}
|
|
257
240
|
async buildGrammar(call, choices) {
|
|
258
241
|
const { asr } = this.data;
|
package/dst/voice.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ export interface SensitiveData {
|
|
|
8
8
|
muteBot: boolean;
|
|
9
9
|
}
|
|
10
10
|
export interface IVoiceCall extends IConversationData {
|
|
11
|
+
/** voicer protocol version */
|
|
11
12
|
vv: number;
|
|
12
13
|
id: string;
|
|
13
14
|
ended: boolean;
|
|
@@ -51,7 +52,9 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
|
|
|
51
52
|
handleZombie?: boolean;
|
|
52
53
|
}, TOut, TParams> {
|
|
53
54
|
runBefore(): Promise<void>;
|
|
54
|
-
exitStep(
|
|
55
|
+
exitStep(exitId: string, data?: any, byThread?: boolean): any;
|
|
56
|
+
exitStepByThread(exitId: string, result: any): any;
|
|
57
|
+
exitToThread(): void;
|
|
55
58
|
sendCommands({ id, type, callback }: IVoiceCall, commands: TODO[]): Promise<void>;
|
|
56
59
|
handleDataThreadStart(): void;
|
|
57
60
|
handleDataThreadEnd(): void;
|
package/dst/voice.js
CHANGED
|
@@ -4,6 +4,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
4
4
|
exports.VoiceStepError = void 0;
|
|
5
5
|
const tslib_1 = require("tslib");
|
|
6
6
|
const lodash_1 = tslib_1.__importDefault(require("lodash"));
|
|
7
|
+
const nanoid_1 = require("nanoid");
|
|
7
8
|
const types_1 = require("@onereach/flow-sdk/dst/types");
|
|
8
9
|
const uuid_1 = require("uuid");
|
|
9
10
|
// TODO: !!!!! import ConvStep from '@onereach/step-conversation/dst/step' !!!!!
|
|
@@ -22,11 +23,34 @@ class VoiceStep extends step_1.default {
|
|
|
22
23
|
await this.handleHeartbeat(this.cache);
|
|
23
24
|
}
|
|
24
25
|
}
|
|
25
|
-
exitStep(
|
|
26
|
+
exitStep(exitId, data, byThread = false) {
|
|
27
|
+
if (byThread)
|
|
28
|
+
return this.exitStepByThread(exitId, data);
|
|
26
29
|
if (this.cache != null) {
|
|
27
30
|
this.handleDataThreadEnd();
|
|
28
31
|
}
|
|
29
|
-
return super.exitStep(
|
|
32
|
+
return super.exitStep(exitId, data);
|
|
33
|
+
}
|
|
34
|
+
exitStepByThread(exitId, result) {
|
|
35
|
+
this.log.debug('exitStepByThread', exitId, result);
|
|
36
|
+
const exitLabel = lodash_1.default.replace(this.getExitStepLabel(exitId) ?? exitId, /\W+/g, '');
|
|
37
|
+
return this.process.runThread({
|
|
38
|
+
id: `${exitLabel}_${(0, nanoid_1.nanoid)(8)}`,
|
|
39
|
+
state: {
|
|
40
|
+
name: this.exitToThread.name,
|
|
41
|
+
direct: true,
|
|
42
|
+
result: {
|
|
43
|
+
conversation: this.conversation,
|
|
44
|
+
conversationThreadId: this.dataThreadId,
|
|
45
|
+
...result
|
|
46
|
+
},
|
|
47
|
+
exitStep: exitId,
|
|
48
|
+
step: this.currentStepId
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
exitToThread() {
|
|
53
|
+
this.thread.exitStep(this.state.exitStep, this.state.result);
|
|
30
54
|
}
|
|
31
55
|
async sendCommands({ id, type, callback }, commands) {
|
|
32
56
|
if (lodash_1.default.isEmpty(commands))
|
|
@@ -146,29 +170,34 @@ class VoiceStep extends step_1.default {
|
|
|
146
170
|
.value();
|
|
147
171
|
}
|
|
148
172
|
async pauseRecording(call, command, sensitiveData) {
|
|
149
|
-
|
|
150
|
-
...call.recordCall && sensitiveData?.muteStep
|
|
151
|
-
? [{
|
|
152
|
-
name: 'stop-record-session',
|
|
153
|
-
params: {
|
|
154
|
-
muteUser: sensitiveData.muteUser,
|
|
155
|
-
muteBot: sensitiveData.muteBot
|
|
156
|
-
}
|
|
157
|
-
}]
|
|
158
|
-
: [],
|
|
159
|
-
command
|
|
160
|
-
]);
|
|
161
|
-
}
|
|
162
|
-
async resumeRecording(call, sensitiveData) {
|
|
173
|
+
const commands = [command];
|
|
163
174
|
if (call.recordCall && sensitiveData?.muteStep) {
|
|
164
|
-
|
|
165
|
-
|
|
175
|
+
if (call.vv >= 2) {
|
|
176
|
+
// newer voicer version automaically should stop/resume session recording
|
|
177
|
+
command.params.sensitiveData = sensitiveData;
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
commands.unshift({
|
|
181
|
+
name: 'stop-record-session',
|
|
166
182
|
params: {
|
|
167
183
|
muteUser: sensitiveData.muteUser,
|
|
168
184
|
muteBot: sensitiveData.muteBot
|
|
169
185
|
}
|
|
170
|
-
}
|
|
186
|
+
});
|
|
187
|
+
}
|
|
171
188
|
}
|
|
189
|
+
await this.sendCommands(call, commands);
|
|
190
|
+
}
|
|
191
|
+
async resumeRecording(call, sensitiveData) {
|
|
192
|
+
if (!call.recordCall || !sensitiveData?.muteStep || call.vv >= 2)
|
|
193
|
+
return;
|
|
194
|
+
await this.sendCommands(call, [{
|
|
195
|
+
name: 'resume-record-session',
|
|
196
|
+
params: {
|
|
197
|
+
muteUser: sensitiveData.muteUser,
|
|
198
|
+
muteBot: sensitiveData.muteBot
|
|
199
|
+
}
|
|
200
|
+
}]);
|
|
172
201
|
}
|
|
173
202
|
async transcript(call, data = {}) {
|
|
174
203
|
const { previousTranscriptId = call.lastTranscriptId, action, keyPress, message, voiceProcessResult, reportingSettingsKey, sections, reprompt, recording, conferenceId, actionFromBot = false } = data;
|