@onereach/step-voice 6.1.22-VOIC1406.7 → 6.1.26-VOIC1449.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dst/Choice.js +7 -14
- package/dst/Custom Voice Input.js +9 -1
- package/dst/Keypad Input.js +9 -1
- package/dst/Say Message.js +9 -11
- package/dst/Voice Recording.d.ts +1 -0
- package/dst/Voice Recording.js +17 -8
- package/dst/Wait For Call.js +1 -0
- package/dst/voice.d.ts +17 -2
- package/dst/voice.js +60 -20
- package/package.json +5 -4
package/dst/Choice.js
CHANGED
|
@@ -97,19 +97,13 @@ class Choice extends voice_1.default {
|
|
|
97
97
|
// There's a specific need to do so. There might be ${variable} section
|
|
98
98
|
this.triggers.local(`in/voice/${call.id}`, async (event) => {
|
|
99
99
|
const reportingSettingsKey = this.rptsStarted ? 'transcriptRepromptResponse' : 'transcriptResponse';
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
},
|
|
108
|
-
reportingSettingsKey: 'transcriptPrompt',
|
|
109
|
-
action: 'Call Prompt',
|
|
110
|
-
actionFromBot: true
|
|
111
|
-
}, interruptionMetadata);
|
|
112
|
-
}
|
|
100
|
+
await this.handleInterruption({
|
|
101
|
+
call,
|
|
102
|
+
event,
|
|
103
|
+
speechSections,
|
|
104
|
+
repromptsList,
|
|
105
|
+
reportingSettingsKey
|
|
106
|
+
});
|
|
113
107
|
switch (event.params.type) {
|
|
114
108
|
case 'digit':
|
|
115
109
|
case 'digits': {
|
|
@@ -279,4 +273,3 @@ class Choice extends voice_1.default {
|
|
|
279
273
|
}
|
|
280
274
|
}
|
|
281
275
|
exports.default = Choice;
|
|
282
|
-
// --------------------------------------------------------------------
|
|
@@ -88,6 +88,13 @@ class CustomVoiceInput extends voice_1.default {
|
|
|
88
88
|
// There's a specific need to do so. There might be ${variable} section
|
|
89
89
|
this.triggers.local(`in/voice/${call.id}`, async (event) => {
|
|
90
90
|
const reportingSettingsKey = this.rptsStarted ? 'transcriptRepromptResponse' : 'transcriptResponse';
|
|
91
|
+
await this.handleInterruption({
|
|
92
|
+
call,
|
|
93
|
+
event,
|
|
94
|
+
speechSections,
|
|
95
|
+
repromptsList,
|
|
96
|
+
reportingSettingsKey
|
|
97
|
+
});
|
|
91
98
|
switch (event.params.type) {
|
|
92
99
|
// digit recognition removed
|
|
93
100
|
case 'recognition': {
|
|
@@ -191,7 +198,7 @@ class CustomVoiceInput extends voice_1.default {
|
|
|
191
198
|
}
|
|
192
199
|
});
|
|
193
200
|
this.triggers.otherwise(async () => {
|
|
194
|
-
await this.transcript(call, {
|
|
201
|
+
const eventId = await this.transcript(call, {
|
|
195
202
|
sections: speechSections,
|
|
196
203
|
reprompt: {
|
|
197
204
|
maxAttempts: repromptsList.length,
|
|
@@ -201,6 +208,7 @@ class CustomVoiceInput extends voice_1.default {
|
|
|
201
208
|
action: 'Call Prompt',
|
|
202
209
|
actionFromBot: true
|
|
203
210
|
});
|
|
211
|
+
command.params.reporterTranscriptEventId = eventId;
|
|
204
212
|
command.params.sections = speechSections;
|
|
205
213
|
command.params.timeout = this.rptsTimeout({ noReplyDelay, repromptsList, initial: true });
|
|
206
214
|
await this.pauseRecording(call, command, sensitiveData);
|
package/dst/Keypad Input.js
CHANGED
|
@@ -70,6 +70,13 @@ class KeypadInput extends voice_1.default {
|
|
|
70
70
|
const speechSections = this.buildSections({ sections: this.data.audio, textType, ttsSettings, allowKeypadBargeIn: keypadBargeIn });
|
|
71
71
|
this.triggers.local(`in/voice/${call.id}`, async (event) => {
|
|
72
72
|
const reportingSettingsKey = this.rptsStarted ? 'transcriptRepromptResponse' : 'transcriptResponse';
|
|
73
|
+
await this.handleInterruption({
|
|
74
|
+
call,
|
|
75
|
+
event,
|
|
76
|
+
speechSections,
|
|
77
|
+
repromptsList,
|
|
78
|
+
reportingSettingsKey
|
|
79
|
+
});
|
|
73
80
|
switch (event.params.type) {
|
|
74
81
|
case 'digits': {
|
|
75
82
|
const digits = event.params.digits;
|
|
@@ -162,7 +169,7 @@ class KeypadInput extends voice_1.default {
|
|
|
162
169
|
}
|
|
163
170
|
});
|
|
164
171
|
this.triggers.otherwise(async () => {
|
|
165
|
-
await this.transcript(call, {
|
|
172
|
+
const eventId = await this.transcript(call, {
|
|
166
173
|
sections: speechSections,
|
|
167
174
|
reprompt: {
|
|
168
175
|
maxAttempts: repromptsList.length,
|
|
@@ -172,6 +179,7 @@ class KeypadInput extends voice_1.default {
|
|
|
172
179
|
action: 'Call Prompt',
|
|
173
180
|
actionFromBot: true
|
|
174
181
|
});
|
|
182
|
+
command.params.reporterTranscriptEventId = eventId;
|
|
175
183
|
command.params.firstDigitTimeout = this.rptsTimeout({ noReplyDelay, repromptsList, initial: true });
|
|
176
184
|
command.params.sections = speechSections;
|
|
177
185
|
await this.pauseRecording(call, command, sensitiveData);
|
package/dst/Say Message.js
CHANGED
|
@@ -19,15 +19,11 @@ class SayMessage extends voice_1.default {
|
|
|
19
19
|
...ttsSettings
|
|
20
20
|
}));
|
|
21
21
|
this.triggers.local(`in/voice/${call.id}`, async (event) => {
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
reportingSettingsKey: 'transcript',
|
|
28
|
-
actionFromBot: true
|
|
29
|
-
}, interruptionMetadata);
|
|
30
|
-
}
|
|
22
|
+
await this.handleInterruption({
|
|
23
|
+
call,
|
|
24
|
+
event,
|
|
25
|
+
speechSections
|
|
26
|
+
});
|
|
31
27
|
switch (event.params.type) {
|
|
32
28
|
case 'hangup':
|
|
33
29
|
await this.handleHangup(call);
|
|
@@ -48,15 +44,17 @@ class SayMessage extends voice_1.default {
|
|
|
48
44
|
const command = {
|
|
49
45
|
name: 'speak',
|
|
50
46
|
params: {
|
|
51
|
-
sections: speechSections
|
|
47
|
+
sections: speechSections,
|
|
48
|
+
reporterTranscriptEventId: ''
|
|
52
49
|
}
|
|
53
50
|
};
|
|
54
|
-
await this.transcript(call, {
|
|
51
|
+
const eventId = await this.transcript(call, {
|
|
55
52
|
action: 'Call Prompt',
|
|
56
53
|
sections: command.params.sections,
|
|
57
54
|
reportingSettingsKey: 'transcript',
|
|
58
55
|
actionFromBot: true
|
|
59
56
|
});
|
|
57
|
+
command.params.reporterTranscriptEventId = eventId;
|
|
60
58
|
await this.pauseRecording(call, command, sensitiveData);
|
|
61
59
|
return this.exitFlow();
|
|
62
60
|
});
|
package/dst/Voice Recording.d.ts
CHANGED
package/dst/Voice Recording.js
CHANGED
|
@@ -9,12 +9,13 @@ class Recording extends voice_1.default {
|
|
|
9
9
|
const { tts, sensitiveData, textType } = this.data;
|
|
10
10
|
const ttsSettings = tts.getSettings(call.tts);
|
|
11
11
|
const sections = this.buildSections({ sections: this.data.audio, textType, ttsSettings });
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
12
|
+
if (!this.data?.disableDefaultBeepSound) {
|
|
13
|
+
sections.push({
|
|
14
|
+
url: 'https://bot-service-recordings.s3.us-west-2.amazonaws.com/beep_sound.wav',
|
|
15
|
+
bargeInVoice: false,
|
|
16
|
+
bargeInKeypad: false
|
|
17
|
+
});
|
|
18
|
+
}
|
|
18
19
|
const command = {
|
|
19
20
|
name: 'start-record',
|
|
20
21
|
params: {
|
|
@@ -24,11 +25,18 @@ class Recording extends voice_1.default {
|
|
|
24
25
|
terminator: this.data.terminationKey
|
|
25
26
|
}),
|
|
26
27
|
duration: parseInt(this.data.recordDurationSeconds),
|
|
27
|
-
sections
|
|
28
|
+
sections,
|
|
29
|
+
reporterTranscriptEventId: ''
|
|
28
30
|
}
|
|
29
31
|
};
|
|
30
32
|
this.triggers.local(`in/voice/${call.id}`, async (event) => {
|
|
31
33
|
const url = event.params.mediaPath;
|
|
34
|
+
await this.handleInterruption({
|
|
35
|
+
call,
|
|
36
|
+
event,
|
|
37
|
+
speechSections: sections,
|
|
38
|
+
reportingSettingsKey: 'transcriptPrompt'
|
|
39
|
+
});
|
|
32
40
|
switch (event.params.type) {
|
|
33
41
|
case 'hangup':
|
|
34
42
|
// TODO do we need zombie?
|
|
@@ -65,12 +73,13 @@ class Recording extends voice_1.default {
|
|
|
65
73
|
this.triggers.otherwise(async () => {
|
|
66
74
|
// TODO figure out Global Command compatibility
|
|
67
75
|
lodash_1.default.set(this.state.hooks, 'skipVoiceEventTypes', ['hangup']);
|
|
68
|
-
await this.transcript(call, {
|
|
76
|
+
const eventId = await this.transcript(call, {
|
|
69
77
|
sections,
|
|
70
78
|
reportingSettingsKey: 'transcriptPrompt',
|
|
71
79
|
action: 'Call Prompt',
|
|
72
80
|
actionFromBot: true
|
|
73
81
|
});
|
|
82
|
+
command.params.reporterTranscriptEventId = eventId;
|
|
74
83
|
await this.pauseRecording(call, command, sensitiveData);
|
|
75
84
|
return this.exitFlow();
|
|
76
85
|
});
|
package/dst/Wait For Call.js
CHANGED
package/dst/voice.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ICallback, IVoiceReporterTranscriptEventArgs, IPromtpSection } from '@onereach/flow-sdk/dst/types';
|
|
1
|
+
import { ICallback, IVoiceReporterTranscriptEventArgs, IPromtpSection, ITypedEvent } from '@onereach/flow-sdk/dst/types';
|
|
2
2
|
import ConvStep, { IConversationData } from './step';
|
|
3
3
|
import BasicError from '@onereach/flow-sdk/dst/errors/base';
|
|
4
4
|
export type TODO = any;
|
|
@@ -54,12 +54,25 @@ export interface VoiceEvent {
|
|
|
54
54
|
type: EventType;
|
|
55
55
|
global?: boolean;
|
|
56
56
|
error?: VoicerError;
|
|
57
|
-
interruptionMetadata
|
|
57
|
+
interruptionMetadata?: VoiceInterruptionMetadata;
|
|
58
|
+
result?: {
|
|
59
|
+
interruptionMetadata?: VoiceInterruptionMetadata;
|
|
60
|
+
};
|
|
58
61
|
}
|
|
59
62
|
export interface CallStartEvent extends VoiceEvent {
|
|
60
63
|
channel: IVoiceCall;
|
|
61
64
|
headers?: Record<string, string>;
|
|
62
65
|
}
|
|
66
|
+
export interface HandleInterruptionParams<TParams extends VoiceEvent = VoiceEvent> {
|
|
67
|
+
call: IVoiceCall;
|
|
68
|
+
event: ITypedEvent<TParams>;
|
|
69
|
+
speechSections: IPromtpSection[];
|
|
70
|
+
repromptsList?: {
|
|
71
|
+
message?: string;
|
|
72
|
+
fileName?: string;
|
|
73
|
+
}[];
|
|
74
|
+
reportingSettingsKey?: string;
|
|
75
|
+
}
|
|
63
76
|
export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends VoiceEvent = VoiceEvent> extends ConvStep<IVoiceCall, TIn & {
|
|
64
77
|
handleCancel?: boolean;
|
|
65
78
|
}, TOut, TParams> {
|
|
@@ -94,4 +107,6 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
|
|
|
94
107
|
get rptsStarted(): boolean;
|
|
95
108
|
canVoicerHearbeat(call: IVoiceCall): boolean;
|
|
96
109
|
canVoicerAck(call: IVoiceCall): boolean;
|
|
110
|
+
getInterruptionMetadata(event: ITypedEvent<TParams>): VoiceInterruptionMetadata | null;
|
|
111
|
+
handleInterruption(params: HandleInterruptionParams<TParams>): Promise<void>;
|
|
97
112
|
}
|
package/dst/voice.js
CHANGED
|
@@ -104,22 +104,26 @@ class VoiceStep extends step_1.default {
|
|
|
104
104
|
this.end();
|
|
105
105
|
}
|
|
106
106
|
extractSectionMessages(sections, interruptionMetadata) {
|
|
107
|
-
|
|
108
|
-
|
|
107
|
+
const { sectionIndex } = interruptionMetadata ?? {};
|
|
108
|
+
const slicedTo = sectionIndex != null ? sectionIndex + 1 : sections.length;
|
|
109
|
+
return sections
|
|
110
|
+
.slice(0, slicedTo)
|
|
109
111
|
.map((s, index) => {
|
|
110
112
|
// Should escape html, max length 4000 symbols
|
|
111
|
-
let text = s.text
|
|
113
|
+
let text = (s.text ?? '')
|
|
114
|
+
.slice(0, 4000)
|
|
115
|
+
.replace(/(<[^>]+>|<\/[^>]+>)/gi, ' ')
|
|
116
|
+
.trim();
|
|
112
117
|
// Extracts the portion of the section text that corresponds to the played time.
|
|
113
118
|
if (interruptionMetadata) {
|
|
114
|
-
const { playedTime, sectionDuration
|
|
119
|
+
const { playedTime, sectionDuration } = interruptionMetadata;
|
|
115
120
|
if (sectionIndex === index) {
|
|
116
121
|
text = this.extractPlayedSectionMessage(text, sectionDuration, playedTime);
|
|
117
122
|
}
|
|
118
123
|
}
|
|
119
124
|
return text;
|
|
120
125
|
})
|
|
121
|
-
.join(' ')
|
|
122
|
-
.value();
|
|
126
|
+
.join(' ');
|
|
123
127
|
}
|
|
124
128
|
extractPlayedSectionMessage(text, sectionDuration, playedTime) {
|
|
125
129
|
const words = text.split(' ');
|
|
@@ -128,7 +132,7 @@ class VoiceStep extends step_1.default {
|
|
|
128
132
|
const playedTimeSec = playedTime / 1000;
|
|
129
133
|
const wordsPerSecond = totalWords / sectionDurationSec;
|
|
130
134
|
const wordsPlayed = Math.floor(wordsPerSecond * playedTimeSec);
|
|
131
|
-
return words.slice(0, wordsPlayed).join(' ');
|
|
135
|
+
return words.slice(0, wordsPlayed).join(' ').trim();
|
|
132
136
|
}
|
|
133
137
|
extractSectionFiles(sections) {
|
|
134
138
|
return lodash_1.default.chain(sections)
|
|
@@ -217,19 +221,25 @@ class VoiceStep extends step_1.default {
|
|
|
217
221
|
* which was returned from the interrupted message,
|
|
218
222
|
* try to update the reporting message.
|
|
219
223
|
*/
|
|
220
|
-
if (interruptionMetadata?.reporterTranscriptEventId
|
|
224
|
+
if (interruptionMetadata?.reporterTranscriptEventId &&
|
|
225
|
+
reportingObject.message) {
|
|
226
|
+
const updateReportingObject = {
|
|
227
|
+
EventId: eventId,
|
|
228
|
+
Timestamp: new Date().toISOString(),
|
|
229
|
+
Event: 'Augment',
|
|
230
|
+
EventValue: {
|
|
231
|
+
eventId: interruptionMetadata.reporterTranscriptEventId,
|
|
232
|
+
eventValue: {
|
|
233
|
+
Message: reportingObject.message,
|
|
234
|
+
},
|
|
235
|
+
},
|
|
236
|
+
};
|
|
237
|
+
this.log.debug(`Augment Transcript`, {
|
|
238
|
+
interruptionMetadata,
|
|
239
|
+
updateReportingObject,
|
|
240
|
+
shouldBeSendToHitl: this.process.cache.hitl != null,
|
|
241
|
+
});
|
|
221
242
|
if (this.process.cache.hitl) {
|
|
222
|
-
const updateReportingObject = {
|
|
223
|
-
EventId: eventId,
|
|
224
|
-
Timestamp: new Date().toISOString(),
|
|
225
|
-
Event: 'Augment',
|
|
226
|
-
EventValue: {
|
|
227
|
-
eventId: interruptionMetadata.reporterTranscriptEventId,
|
|
228
|
-
eventValue: {
|
|
229
|
-
Message: reportingObject.message,
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
};
|
|
233
243
|
await this.process.cache.hitl.queueEvents([updateReportingObject]);
|
|
234
244
|
}
|
|
235
245
|
}
|
|
@@ -366,7 +376,8 @@ class VoiceStep extends step_1.default {
|
|
|
366
376
|
if (reporting) {
|
|
367
377
|
lodash_1.default.assign(reportingObject, reporting);
|
|
368
378
|
}
|
|
369
|
-
await this.transcript(call, reportingObject);
|
|
379
|
+
const eventId = await this.transcript(call, reportingObject);
|
|
380
|
+
params.reporterTranscriptEventId = eventId;
|
|
370
381
|
if (call.vv >= 2)
|
|
371
382
|
await this.pauseRecording(call, command, sensitiveData);
|
|
372
383
|
else
|
|
@@ -394,6 +405,35 @@ class VoiceStep extends step_1.default {
|
|
|
394
405
|
canVoicerAck(call) {
|
|
395
406
|
return call.vv >= 2;
|
|
396
407
|
}
|
|
408
|
+
getInterruptionMetadata(event) {
|
|
409
|
+
return event.params?.interruptionMetadata
|
|
410
|
+
?? event.params?.result?.interruptionMetadata
|
|
411
|
+
?? null;
|
|
412
|
+
}
|
|
413
|
+
async handleInterruption(params) {
|
|
414
|
+
const { call, event, speechSections, repromptsList = [], reportingSettingsKey = 'transcript' } = params;
|
|
415
|
+
const interruptionMetadata = this.getInterruptionMetadata(event);
|
|
416
|
+
this.log.debug('handleInterruption', { event, speechSections, repromptsList });
|
|
417
|
+
if (!interruptionMetadata)
|
|
418
|
+
return;
|
|
419
|
+
const sections = [];
|
|
420
|
+
if (repromptsList.length && reportingSettingsKey === 'transcriptRepromptResponse') {
|
|
421
|
+
const current = repromptsList[this.rptsIndex - 1];
|
|
422
|
+
sections.push({
|
|
423
|
+
url: current?.fileName,
|
|
424
|
+
text: current?.message,
|
|
425
|
+
});
|
|
426
|
+
}
|
|
427
|
+
else {
|
|
428
|
+
sections.push(...speechSections);
|
|
429
|
+
}
|
|
430
|
+
await this.transcript(call, {
|
|
431
|
+
action: 'Call Prompt',
|
|
432
|
+
actionFromBot: true,
|
|
433
|
+
sections,
|
|
434
|
+
reportingSettingsKey,
|
|
435
|
+
}, interruptionMetadata);
|
|
436
|
+
}
|
|
397
437
|
}
|
|
398
438
|
exports.default = VoiceStep;
|
|
399
439
|
module.exports = VoiceStep;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@onereach/step-voice",
|
|
3
|
-
"version": "6.1.
|
|
3
|
+
"version": "6.1.26-VOIC1449.1",
|
|
4
4
|
"author": "Roman Zolotarov <roman.zolotarov@onereach.com>",
|
|
5
5
|
"contributors": [
|
|
6
6
|
"Roman Zolotarov",
|
|
@@ -9,8 +9,8 @@
|
|
|
9
9
|
"description": "Onereach.ai Voice Steps",
|
|
10
10
|
"main": "index.js",
|
|
11
11
|
"engines": {
|
|
12
|
-
"node": ">=
|
|
13
|
-
"npm": ">=
|
|
12
|
+
"node": ">= 20",
|
|
13
|
+
"npm": ">= 10"
|
|
14
14
|
},
|
|
15
15
|
"dependencies": {
|
|
16
16
|
"@onereach/step-conversation": "^1.0.40",
|
|
@@ -54,8 +54,9 @@
|
|
|
54
54
|
"download": "./scripts/download.sh",
|
|
55
55
|
"upload": "./scripts/upload.sh",
|
|
56
56
|
"pub": "./scripts/publish.sh",
|
|
57
|
-
"pub-
|
|
57
|
+
"pub-prerelease": "./scripts/publish-prerelease.sh",
|
|
58
58
|
"update-module-versions": "./scripts/update-module-versions.sh",
|
|
59
|
+
"patch-steps": "./scripts/update-steps-versions.sh patch",
|
|
59
60
|
"test": "jest --forceExit"
|
|
60
61
|
},
|
|
61
62
|
"repository": {
|