@onereach/step-voice 7.0.9-processttschunk.0 → 7.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,6 +19,11 @@ interface INPUT {
19
19
  enableSpoofCallerId?: boolean;
20
20
  spoofCallerId?: string;
21
21
  isAMD?: boolean;
22
+ enableWhisperTransfer?: boolean;
23
+ whisperAnnounceAudio?: TODO[];
24
+ textType?: string;
25
+ whisperCustomerConversation?: string;
26
+ whisperCustomerConversationThread?: string;
22
27
  }
23
28
  export default class InitiateCall extends VoiceStep<INPUT, TODO, CallStartEvent> {
24
29
  get conversation(): string | import("@onereach/flow-sdk/types").IMergeField;
@@ -47,7 +47,7 @@ class InitiateCall extends voice_1.default {
47
47
  this.exitStep('cancel');
48
48
  }
49
49
  async waitForCall() {
50
- const { asr, tts, from: botNumber, endUserNumber, sipHost, sipUser, sipPassword, timeout, headers, enableSpoofCallerId, spoofCallerId, isAMD, otherCallRef, otherCallRefThread, handleCancel } = this.data;
50
+ const { asr, tts, from: botNumber, endUserNumber, sipHost, sipUser, sipPassword, timeout, headers, enableSpoofCallerId, spoofCallerId, isAMD, otherCallRef, otherCallRefThread, handleCancel, enableWhisperTransfer, whisperAnnounceAudio, textType, whisperCustomerConversation, } = this.data;
51
51
  const call = await this.fetchData();
52
52
  this.triggers.once(`in/voice/${call.id}/event`, async (event) => {
53
53
  switch (event.params.type) {
@@ -84,7 +84,7 @@ class InitiateCall extends voice_1.default {
84
84
  reportingSettingsKey: 'transcript',
85
85
  actionFromBot: true
86
86
  });
87
- await this.sendCommands(newCall, [
87
+ const commands = [
88
88
  ...isAMD
89
89
  ? [{
90
90
  name: 'start-avmd',
@@ -100,8 +100,39 @@ class InitiateCall extends voice_1.default {
100
100
  type: asr.serverSettings.engine
101
101
  }
102
102
  }]
103
- : []
104
- ]);
103
+ : [],
104
+ ];
105
+ if (enableWhisperTransfer === true) {
106
+ const ttsSettings = tts.getSettings(call.tts);
107
+ const announceSpeechSections = whisperAnnounceAudio?.map(section => ({
108
+ text: section.voiceTextMsg,
109
+ url: section.audioUrl,
110
+ bargeInVoice: false,
111
+ bargeInKeypad: false,
112
+ textType,
113
+ provider: ttsSettings.provider,
114
+ ...ttsSettings
115
+ }));
116
+ const announceMessageCommand = {
117
+ name: 'speak',
118
+ params: {
119
+ sections: announceSpeechSections,
120
+ reporterTranscriptEventId: '',
121
+ }
122
+ };
123
+ const customerCall = await this.getConversationByName(whisperCustomerConversation ?? '');
124
+ const transferCommand = {
125
+ name: 'defer_replaces',
126
+ params: {
127
+ callerId: customerCall['id']
128
+ }
129
+ };
130
+ commands.push(...[
131
+ announceMessageCommand,
132
+ transferCommand,
133
+ ]);
134
+ }
135
+ await this.sendCommands(newCall, commands);
105
136
  return this.exitStep('success');
106
137
  }
107
138
  case 'hangup': {
@@ -2,6 +2,7 @@ import VoiceStep, { TODO, VoiceEvent } from './voice';
2
2
  interface INPUT {
3
3
  conferenceName: string;
4
4
  stayInConference: boolean;
5
+ mute: boolean;
5
6
  volumeIn?: number;
6
7
  volumeOut?: number;
7
8
  }
@@ -6,7 +6,7 @@ const voice_1 = tslib_1.__importDefault(require("./voice"));
6
6
  class JoinConference extends voice_1.default {
7
7
  async runStep() {
8
8
  const call = await this.fetchData();
9
- const { conferenceName, stayInConference, volumeIn, volumeOut } = this.data;
9
+ const { conferenceName, stayInConference, volumeIn, volumeOut, mute } = this.data;
10
10
  this.triggers.local(`in/voice/${call.id}`, async (event) => {
11
11
  switch (event.params.type) {
12
12
  case 'hangup':
@@ -49,7 +49,10 @@ class JoinConference extends voice_1.default {
49
49
  name: 'conference.start',
50
50
  params: {
51
51
  room: conferenceName,
52
- flags: [call.vv >= 3 ? 'mandatory_member_endconf' : 'endconf'],
52
+ flags: [
53
+ call.vv >= 3 ? 'mandatory_member_endconf' : 'endconf',
54
+ ...mute ? ['mute'] : []
55
+ ],
53
56
  stayInConference,
54
57
  volumeIn,
55
58
  volumeOut
@@ -46,7 +46,8 @@ class SayMessage extends voice_1.default {
46
46
  name: 'speak',
47
47
  params: {
48
48
  sections: speechSections,
49
- reporterTranscriptEventId: ''
49
+ reporterTranscriptEventId: '',
50
+ useWhisperFeature: true
50
51
  }
51
52
  };
52
53
  const eventId = await this.transcript(call, {
package/dst/step.d.ts CHANGED
@@ -25,6 +25,7 @@ export default class ConvStep<TData extends IConversationData, TIn = unknown, TO
25
25
  get useQueue(): boolean;
26
26
  fetchData(): Promise<TData>;
27
27
  getConversation(): Promise<IConversation>;
28
+ getConversationByName(conversation: string): Promise<IConversation>;
28
29
  updateData(): Promise<void>;
29
30
  hasConversation(): Promise<boolean>;
30
31
  runBefore(): Promise<void>;
package/dst/step.js CHANGED
@@ -51,6 +51,10 @@ class ConvStep extends step_1.default {
51
51
  async getConversation() {
52
52
  return (await this.fetchData())._conv;
53
53
  }
54
+ async getConversationByName(conversation) {
55
+ const convDataThread = this.process.getSafeThread(this.dataThreadId);
56
+ return await convDataThread.get(conversation);
57
+ }
54
58
  async updateData() {
55
59
  if (this.convDataCache == null)
56
60
  throw new Error(`missing conversation cache in state ${this.state.name}`);
package/dst/voice.d.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import { ICallback } from '@onereach/flow-sdk/dst/types/eventManager';
2
- import { IPromtpSection as IPromptSection, IVoiceReporterTranscriptEventArgs } from '@onereach/flow-sdk/dst/types/reporter';
2
+ import { IPromtpSection, IVoiceReporterTranscriptEventArgs } from '@onereach/flow-sdk/dst/types/reporter';
3
3
  import { IEvent } from '@onereach/flow-sdk/dst/types/event';
4
4
  import ConvStep, { IConversationData } from './step';
5
5
  import BasicError from '@onereach/flow-sdk/dst/errors/base';
6
- import { WITH_VAR } from '@onereach/flow-sdk/dst/types/thread';
6
+ import { WITH_VAR } from "@onereach/flow-sdk/types/thread";
7
7
  export type TODO = any;
8
8
  export interface SensitiveData {
9
9
  muteStep: boolean;
@@ -33,7 +33,7 @@ export interface IVoiceCall extends IConversationData {
33
33
  user: string;
34
34
  };
35
35
  }
36
- export type EventType = 'hangup' | 'ack' | 'error' | 'cancel' | 'background' | 'avm-detected' | 'recognition' | 'digit' | 'digits' | 'conference-start' | 'conference-end' | 'playback' | 'timeout' | 'record' | 'bridge' | 'bridge/ended' | 'is_flow_ready' | 'call' | 'dtmf-sent' | 'tts-chunks-playback-done';
36
+ export type EventType = 'hangup' | 'ack' | 'error' | 'cancel' | 'background' | 'avm-detected' | 'recognition' | 'digit' | 'digits' | 'conference-start' | 'conference-end' | 'playback' | 'timeout' | 'record' | 'bridge' | 'bridge/ended' | 'is_flow_ready' | 'call' | 'dtmf-sent';
37
37
  export declare class VoiceStepError extends BasicError {
38
38
  }
39
39
  export type VoicerError = Error & {
@@ -69,11 +69,11 @@ export interface CallStartEvent extends VoiceEvent {
69
69
  export interface HandleInterruptionParams<TParams extends VoiceEvent = VoiceEvent> {
70
70
  call: IVoiceCall;
71
71
  event: IEvent<TParams>;
72
- speechSections: IPromptSection[];
73
- repromptsList?: Array<{
72
+ speechSections: IPromtpSection[];
73
+ repromptsList?: {
74
74
  message?: string;
75
75
  fileName?: string;
76
- }>;
76
+ }[];
77
77
  reportingSettingsKey?: string;
78
78
  }
79
79
  export type VoiceEvents<TParams> = IEvent<TParams, WITH_VAR<'in/voice/'>> | IEvent<TParams, WITH_VAR<'in/voice/', '/event'>>;
@@ -87,9 +87,9 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
87
87
  sendCommands({ id, type, callback }: IVoiceCall, commands: TODO[]): Promise<void>;
88
88
  handleHeartbeat(call: IVoiceCall): Promise<void>;
89
89
  handleCancel(): void;
90
- extractSectionMessages(sections: IPromptSection[], interruptionMetadata?: VoiceInterruptionMetadata): string;
90
+ extractSectionMessages(sections: IPromtpSection[], interruptionMetadata?: VoiceInterruptionMetadata): string;
91
91
  extractPlayedSectionMessage(text: string, sectionDuration: number, playedTime: number): string;
92
- extractSectionFiles(sections: IPromptSection[]): Array<{
92
+ extractSectionFiles(sections: IPromtpSection[]): Array<{
93
93
  fileUrl: string;
94
94
  fileType: string;
95
95
  }>;
@@ -109,8 +109,6 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
109
109
  rptsHasMore({ repromptsList }: TODO): boolean;
110
110
  get rptsIndex(): number;
111
111
  get rptsStarted(): boolean;
112
- canVoicerHeartbeat(call: IVoiceCall): boolean;
113
- /** @deprecated use `this.canVoicerHeartbeat` instead */
114
112
  canVoicerHearbeat(call: IVoiceCall): boolean;
115
113
  canVoicerAck(call: IVoiceCall): boolean;
116
114
  getInterruptionMetadata(event: IEvent<TParams>): VoiceInterruptionMetadata | null;
package/dst/voice.js CHANGED
@@ -63,7 +63,7 @@ class VoiceStep extends step_1.default {
63
63
  step: { key: this.session.key, trd: this.isGlobal ? this.workerThreadId : this.thread.id } // response should be sent to this session
64
64
  },
65
65
  reporting: {
66
- ...this.session.getSessionRef()
66
+ ...this.session.getSessionRef(),
67
67
  }
68
68
  };
69
69
  const result = await this.thread.eventManager.emit(event, {
@@ -75,14 +75,14 @@ class VoiceStep extends step_1.default {
75
75
  throw new Error(`failed to send command to call: ${id}`);
76
76
  }
77
77
  async handleHeartbeat(call) {
78
- const allowHeartbeat = this.canVoicerHeartbeat(call);
78
+ const allowHeartbeat = this.canVoicerHearbeat(call);
79
79
  if (allowHeartbeat) {
80
80
  if (this.thread.background) {
81
81
  delete this.waits.timeout;
82
82
  return;
83
83
  }
84
- const expectHeartbeatBefore = Date.now() + 290000;
85
- this.triggers.deadline(expectHeartbeatBefore, () => {
84
+ const expectHearbeatBefore = Date.now() + 290000;
85
+ this.triggers.deadline(expectHearbeatBefore, () => {
86
86
  this.thread.background = true;
87
87
  if (call.ended) {
88
88
  this.log.warn('missing heartbeat, call is ended');
@@ -149,7 +149,7 @@ class VoiceStep extends step_1.default {
149
149
  const commands = [command];
150
150
  const stopRecording = (call.recordCall && sensitiveData?.muteStep);
151
151
  if (call.vv >= 2) {
152
- // newer voicer version automatically should stop/resume session recording
152
+ // newer voicer version automaically should stop/resume session recording
153
153
  command.params.sensitiveData = stopRecording ? sensitiveData : {};
154
154
  }
155
155
  else if (stopRecording) {
@@ -232,14 +232,14 @@ class VoiceStep extends step_1.default {
232
232
  EventValue: {
233
233
  eventId: interruptionMetadata.reporterTranscriptEventId,
234
234
  eventValue: {
235
- Message: reportingObject.message
236
- }
237
- }
235
+ Message: reportingObject.message,
236
+ },
237
+ },
238
238
  };
239
- this.log.debug('Augment Transcript', {
239
+ this.log.debug(`Augment Transcript`, {
240
240
  interruptionMetadata,
241
241
  updateReportingObject,
242
- shouldBeSendToHitl: this.process.cache.hitl != null
242
+ shouldBeSendToHitl: this.process.cache.hitl != null,
243
243
  });
244
244
  if (this.process.cache.hitl) {
245
245
  await this.process.cache.hitl.queueEvents([updateReportingObject]);
@@ -401,20 +401,16 @@ class VoiceStep extends step_1.default {
401
401
  get rptsStarted() {
402
402
  return this.rptsIndex !== 0;
403
403
  }
404
- canVoicerHeartbeat(call) {
405
- return call.vv >= 1;
406
- }
407
- /** @deprecated use `this.canVoicerHeartbeat` instead */
408
404
  canVoicerHearbeat(call) {
409
- return this.canVoicerHeartbeat(call);
405
+ return call.vv >= 1;
410
406
  }
411
407
  canVoicerAck(call) {
412
408
  return call.vv >= 2;
413
409
  }
414
410
  getInterruptionMetadata(event) {
415
- return event.params?.interruptionMetadata ??
416
- event.params?.result?.interruptionMetadata ??
417
- null;
411
+ return event.params?.interruptionMetadata
412
+ ?? event.params?.result?.interruptionMetadata
413
+ ?? null;
418
414
  }
419
415
  async handleInterruption(params) {
420
416
  const { call, event, speechSections, repromptsList = [], reportingSettingsKey = 'transcript' } = params;
@@ -427,7 +423,7 @@ class VoiceStep extends step_1.default {
427
423
  const current = repromptsList[this.rptsIndex - 1];
428
424
  sections.push({
429
425
  url: current?.fileName,
430
- text: current?.message
426
+ text: current?.message,
431
427
  });
432
428
  }
433
429
  else {
@@ -437,7 +433,7 @@ class VoiceStep extends step_1.default {
437
433
  action: 'Call Prompt',
438
434
  actionFromBot: true,
439
435
  sections,
440
- reportingSettingsKey
436
+ reportingSettingsKey,
441
437
  }, interruptionMetadata);
442
438
  }
443
439
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@onereach/step-voice",
3
- "version": "7.0.9-processttschunk.0",
3
+ "version": "7.0.10",
4
4
  "author": "Roman Zolotarov <roman.zolotarov@onereach.com>",
5
5
  "contributors": [
6
6
  "Roman Zolotarov",
@@ -1,13 +0,0 @@
1
- import VoiceStep, { VoiceEvent } from './voice';
2
- interface INPUT {
3
- textChunk: string;
4
- isFinal: boolean;
5
- flush?: boolean;
6
- }
7
- interface EVENT extends VoiceEvent {
8
- exitId?: string;
9
- }
10
- export default class ProcessTtsChunk extends VoiceStep<INPUT, {}, EVENT> {
11
- runStep(): Promise<void>;
12
- }
13
- export {};
@@ -1,27 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const tslib_1 = require("tslib");
4
- const voice_1 = tslib_1.__importDefault(require("./voice"));
5
- class ProcessTtsChunk extends voice_1.default {
6
- async runStep() {
7
- const call = await this.fetchData();
8
- const { textChunk, isFinal, flush } = this.data;
9
- const command = {
10
- name: 'process-tts-chunk',
11
- params: {
12
- chunk: textChunk,
13
- isFinal,
14
- flush,
15
- }
16
- };
17
- this.log.info('ProcessTtsChunk command', { command });
18
- this.triggers.local(`in/voice/${call.id}`, async (event) => {
19
- this.log.info('Received event from voicer', event);
20
- });
21
- this.triggers.otherwise(async () => {
22
- await this.sendCommands({ ...call, type: 'tts-chunk' }, [command]);
23
- this.exitStep('next');
24
- });
25
- }
26
- }
27
- exports.default = ProcessTtsChunk;
@@ -1,18 +0,0 @@
1
- import VoiceStep, { TODO, VoiceEvent } from './voice';
2
- interface INPUT {
3
- voiceId: string;
4
- infiniteProcessing: boolean;
5
- asr: TODO;
6
- endpointing: number;
7
- voiceSettings: any;
8
- queryParams: any;
9
- minConfidence: number;
10
- confirmationConfidence?: number;
11
- }
12
- interface EVENT extends VoiceEvent {
13
- phrases?: TODO[];
14
- }
15
- export default class StartTTSChunksProcessing extends VoiceStep<INPUT, {}, EVENT> {
16
- runStep(): Promise<void>;
17
- }
18
- export {};
@@ -1,68 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const tslib_1 = require("tslib");
4
- const voice_1 = tslib_1.__importDefault(require("./voice"));
5
- class StartTTSChunksProcessing extends voice_1.default {
6
- async runStep() {
7
- const call = await this.fetchData();
8
- const { voiceId, infiniteProcessing, asr, endpointing, minConfidence, confirmationConfidence, voiceSettings: getSettings, queryParams: getParams } = this.data;
9
- const grammar = {
10
- id: this.currentStepId,
11
- asr: {
12
- ...asr.getSettings(call.asr),
13
- config: {
14
- endpointing,
15
- }
16
- },
17
- };
18
- const voiceSettings = await getSettings.call(this);
19
- const queryParams = await getParams.call(this);
20
- const command = {
21
- name: 'start-TTS-chunks-processing',
22
- params: {
23
- voiceId,
24
- infiniteProcessing,
25
- grammar,
26
- voiceSettings,
27
- queryParams,
28
- minConfidence,
29
- confirmationConfidence
30
- }
31
- };
32
- this.log.debug('StartTTSChunksProcessing command', { command });
33
- this.triggers.local(`in/voice/${call.id}`, async (event) => {
34
- this.log.info('Received event from voicer', event);
35
- switch (event.params.type) {
36
- case 'tts-chunks-playback-done': {
37
- this.log.info('tts-chunks-playback-done', event.params);
38
- return this.exitStep('next', {}, false);
39
- }
40
- case 'timeout': {
41
- this.log.info('timeout', event.params);
42
- return this.exitStep(event.params.type, {}, false);
43
- }
44
- case 'recognition': {
45
- this.log.info('recognition', event.params);
46
- const params = event.params;
47
- const exitData = this.exitChoiceData('voice', params);
48
- this.log.info('exitData', exitData);
49
- return this.exitStep(event.params.type, exitData, false);
50
- }
51
- case 'hangup': {
52
- await this.handleHangup(call);
53
- return await this.waitConvEnd();
54
- }
55
- case 'cancel': {
56
- return this.handleCancel();
57
- }
58
- case 'error':
59
- return this.throwError(event.params.error);
60
- }
61
- });
62
- this.triggers.otherwise(async () => {
63
- await this.pauseRecording(call, command);
64
- return this.exitFlow();
65
- });
66
- }
67
- }
68
- exports.default = StartTTSChunksProcessing;