@onereach/step-voice 7.0.9-VOIC1575.9 → 7.0.9-processttschunk.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,7 +49,6 @@ class InitiateCall extends voice_1.default {
49
49
  async waitForCall() {
50
50
  const { asr, tts, from: botNumber, endUserNumber, sipHost, sipUser, sipPassword, timeout, headers, enableSpoofCallerId, spoofCallerId, isAMD, otherCallRef, otherCallRefThread, handleCancel } = this.data;
51
51
  const call = await this.fetchData();
52
- const callALegId = headers?.find(h => h.name === 'X-Leg-A')?.value;
53
52
  this.triggers.once(`in/voice/${call.id}/event`, async (event) => {
54
53
  switch (event.params.type) {
55
54
  case 'is_flow_ready': {
@@ -101,46 +100,7 @@ class InitiateCall extends voice_1.default {
101
100
  type: asr.serverSettings.engine
102
101
  }
103
102
  }]
104
- : [],
105
- {
106
- name: 'whisper.hold',
107
- params: {
108
- id: callALegId
109
- }
110
- },
111
- {
112
- name: 'playback',
113
- params: {
114
- 'sections': [
115
- {
116
- 'text': '<speak>This is a live transfer from the automated system. Stand by for connection.</speak>',
117
- 'url': '',
118
- 'bargeInVoice': false,
119
- 'bargeInKeypad': false,
120
- 'textType': 'ssml',
121
- 'provider': 'polly',
122
- 'voiceId': 'Joanna',
123
- 'engine': 'standard',
124
- 'isStream': false,
125
- },
126
- ],
127
- 'useWhisperFeature': false,
128
- 'sensitiveData': {},
129
- resumeAfterGc: true,
130
- }
131
- },
132
- {
133
- name: 'whisper.hold_off',
134
- params: {
135
- id: callALegId
136
- }
137
- },
138
- {
139
- name: 'whisper.transfer',
140
- params: {
141
- id: callALegId
142
- }
143
- },
103
+ : []
144
104
  ]);
145
105
  return this.exitStep('success');
146
106
  }
@@ -218,8 +178,7 @@ class InitiateCall extends voice_1.default {
218
178
  : undefined,
219
179
  timeout: originateTimeout,
220
180
  version: 2,
221
- sessionExpireTime: this.session.expireTime,
222
- useWhisperFeature: false
181
+ sessionExpireTime: this.session.expireTime
223
182
  };
224
183
  if (otherCallRef) {
225
184
  // eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing
@@ -0,0 +1,13 @@
1
+ import VoiceStep, { VoiceEvent } from './voice';
2
+ interface INPUT {
3
+ textChunk: string;
4
+ isFinal: boolean;
5
+ flush?: boolean;
6
+ }
7
+ interface EVENT extends VoiceEvent {
8
+ exitId?: string;
9
+ }
10
+ export default class ProcessTtsChunk extends VoiceStep<INPUT, {}, EVENT> {
11
+ runStep(): Promise<void>;
12
+ }
13
+ export {};
@@ -0,0 +1,27 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const tslib_1 = require("tslib");
4
+ const voice_1 = tslib_1.__importDefault(require("./voice"));
5
+ class ProcessTtsChunk extends voice_1.default {
6
+ async runStep() {
7
+ const call = await this.fetchData();
8
+ const { textChunk, isFinal, flush } = this.data;
9
+ const command = {
10
+ name: 'process-tts-chunk',
11
+ params: {
12
+ chunk: textChunk,
13
+ isFinal,
14
+ flush,
15
+ }
16
+ };
17
+ this.log.info('ProcessTtsChunk command', { command });
18
+ this.triggers.local(`in/voice/${call.id}`, async (event) => {
19
+ this.log.info('Received event from voicer', event);
20
+ });
21
+ this.triggers.otherwise(async () => {
22
+ await this.sendCommands({ ...call, type: 'tts-chunk' }, [command]);
23
+ this.exitStep('next');
24
+ });
25
+ }
26
+ }
27
+ exports.default = ProcessTtsChunk;
@@ -46,8 +46,7 @@ class SayMessage extends voice_1.default {
46
46
  name: 'speak',
47
47
  params: {
48
48
  sections: speechSections,
49
- reporterTranscriptEventId: '',
50
- useWhisperFeature: true
49
+ reporterTranscriptEventId: ''
51
50
  }
52
51
  };
53
52
  const eventId = await this.transcript(call, {
@@ -0,0 +1,18 @@
1
+ import VoiceStep, { TODO, VoiceEvent } from './voice';
2
+ interface INPUT {
3
+ voiceId: string;
4
+ infiniteProcessing: boolean;
5
+ asr: TODO;
6
+ endpointing: number;
7
+ voiceSettings: any;
8
+ queryParams: any;
9
+ minConfidence: number;
10
+ confirmationConfidence?: number;
11
+ }
12
+ interface EVENT extends VoiceEvent {
13
+ phrases?: TODO[];
14
+ }
15
+ export default class StartTTSChunksProcessing extends VoiceStep<INPUT, {}, EVENT> {
16
+ runStep(): Promise<void>;
17
+ }
18
+ export {};
@@ -0,0 +1,68 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const tslib_1 = require("tslib");
4
+ const voice_1 = tslib_1.__importDefault(require("./voice"));
5
+ class StartTTSChunksProcessing extends voice_1.default {
6
+ async runStep() {
7
+ const call = await this.fetchData();
8
+ const { voiceId, infiniteProcessing, asr, endpointing, minConfidence, confirmationConfidence, voiceSettings: getSettings, queryParams: getParams } = this.data;
9
+ const grammar = {
10
+ id: this.currentStepId,
11
+ asr: {
12
+ ...asr.getSettings(call.asr),
13
+ config: {
14
+ endpointing,
15
+ }
16
+ },
17
+ };
18
+ const voiceSettings = await getSettings.call(this);
19
+ const queryParams = await getParams.call(this);
20
+ const command = {
21
+ name: 'start-TTS-chunks-processing',
22
+ params: {
23
+ voiceId,
24
+ infiniteProcessing,
25
+ grammar,
26
+ voiceSettings,
27
+ queryParams,
28
+ minConfidence,
29
+ confirmationConfidence
30
+ }
31
+ };
32
+ this.log.debug('StartTTSChunksProcessing command', { command });
33
+ this.triggers.local(`in/voice/${call.id}`, async (event) => {
34
+ this.log.info('Received event from voicer', event);
35
+ switch (event.params.type) {
36
+ case 'tts-chunks-playback-done': {
37
+ this.log.info('tts-chunks-playback-done', event.params);
38
+ return this.exitStep('next', {}, false);
39
+ }
40
+ case 'timeout': {
41
+ this.log.info('timeout', event.params);
42
+ return this.exitStep(event.params.type, {}, false);
43
+ }
44
+ case 'recognition': {
45
+ this.log.info('recognition', event.params);
46
+ const params = event.params;
47
+ const exitData = this.exitChoiceData('voice', params);
48
+ this.log.info('exitData', exitData);
49
+ return this.exitStep(event.params.type, exitData, false);
50
+ }
51
+ case 'hangup': {
52
+ await this.handleHangup(call);
53
+ return await this.waitConvEnd();
54
+ }
55
+ case 'cancel': {
56
+ return this.handleCancel();
57
+ }
58
+ case 'error':
59
+ return this.throwError(event.params.error);
60
+ }
61
+ });
62
+ this.triggers.otherwise(async () => {
63
+ await this.pauseRecording(call, command);
64
+ return this.exitFlow();
65
+ });
66
+ }
67
+ }
68
+ exports.default = StartTTSChunksProcessing;
package/dst/voice.d.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import { ICallback } from '@onereach/flow-sdk/dst/types/eventManager';
2
- import { IPromtpSection, IVoiceReporterTranscriptEventArgs } from '@onereach/flow-sdk/dst/types/reporter';
2
+ import { IPromtpSection as IPromptSection, IVoiceReporterTranscriptEventArgs } from '@onereach/flow-sdk/dst/types/reporter';
3
3
  import { IEvent } from '@onereach/flow-sdk/dst/types/event';
4
4
  import ConvStep, { IConversationData } from './step';
5
5
  import BasicError from '@onereach/flow-sdk/dst/errors/base';
6
- import { WITH_VAR } from "@onereach/flow-sdk/types/thread";
6
+ import { WITH_VAR } from '@onereach/flow-sdk/dst/types/thread';
7
7
  export type TODO = any;
8
8
  export interface SensitiveData {
9
9
  muteStep: boolean;
@@ -33,7 +33,7 @@ export interface IVoiceCall extends IConversationData {
33
33
  user: string;
34
34
  };
35
35
  }
36
- export type EventType = 'hangup' | 'ack' | 'error' | 'cancel' | 'background' | 'avm-detected' | 'recognition' | 'digit' | 'digits' | 'conference-start' | 'conference-end' | 'playback' | 'timeout' | 'record' | 'bridge' | 'bridge/ended' | 'is_flow_ready' | 'call' | 'dtmf-sent';
36
+ export type EventType = 'hangup' | 'ack' | 'error' | 'cancel' | 'background' | 'avm-detected' | 'recognition' | 'digit' | 'digits' | 'conference-start' | 'conference-end' | 'playback' | 'timeout' | 'record' | 'bridge' | 'bridge/ended' | 'is_flow_ready' | 'call' | 'dtmf-sent' | 'tts-chunks-playback-done';
37
37
  export declare class VoiceStepError extends BasicError {
38
38
  }
39
39
  export type VoicerError = Error & {
@@ -69,11 +69,11 @@ export interface CallStartEvent extends VoiceEvent {
69
69
  export interface HandleInterruptionParams<TParams extends VoiceEvent = VoiceEvent> {
70
70
  call: IVoiceCall;
71
71
  event: IEvent<TParams>;
72
- speechSections: IPromtpSection[];
73
- repromptsList?: {
72
+ speechSections: IPromptSection[];
73
+ repromptsList?: Array<{
74
74
  message?: string;
75
75
  fileName?: string;
76
- }[];
76
+ }>;
77
77
  reportingSettingsKey?: string;
78
78
  }
79
79
  export type VoiceEvents<TParams> = IEvent<TParams, WITH_VAR<'in/voice/'>> | IEvent<TParams, WITH_VAR<'in/voice/', '/event'>>;
@@ -87,9 +87,9 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
87
87
  sendCommands({ id, type, callback }: IVoiceCall, commands: TODO[]): Promise<void>;
88
88
  handleHeartbeat(call: IVoiceCall): Promise<void>;
89
89
  handleCancel(): void;
90
- extractSectionMessages(sections: IPromtpSection[], interruptionMetadata?: VoiceInterruptionMetadata): string;
90
+ extractSectionMessages(sections: IPromptSection[], interruptionMetadata?: VoiceInterruptionMetadata): string;
91
91
  extractPlayedSectionMessage(text: string, sectionDuration: number, playedTime: number): string;
92
- extractSectionFiles(sections: IPromtpSection[]): Array<{
92
+ extractSectionFiles(sections: IPromptSection[]): Array<{
93
93
  fileUrl: string;
94
94
  fileType: string;
95
95
  }>;
@@ -109,6 +109,8 @@ export default class VoiceStep<TIn = unknown, TOut = unknown, TParams extends Vo
109
109
  rptsHasMore({ repromptsList }: TODO): boolean;
110
110
  get rptsIndex(): number;
111
111
  get rptsStarted(): boolean;
112
+ canVoicerHeartbeat(call: IVoiceCall): boolean;
113
+ /** @deprecated use `this.canVoicerHeartbeat` instead */
112
114
  canVoicerHearbeat(call: IVoiceCall): boolean;
113
115
  canVoicerAck(call: IVoiceCall): boolean;
114
116
  getInterruptionMetadata(event: IEvent<TParams>): VoiceInterruptionMetadata | null;
package/dst/voice.js CHANGED
@@ -63,7 +63,7 @@ class VoiceStep extends step_1.default {
63
63
  step: { key: this.session.key, trd: this.isGlobal ? this.workerThreadId : this.thread.id } // response should be sent to this session
64
64
  },
65
65
  reporting: {
66
- ...this.session.getSessionRef(),
66
+ ...this.session.getSessionRef()
67
67
  }
68
68
  };
69
69
  const result = await this.thread.eventManager.emit(event, {
@@ -75,14 +75,14 @@ class VoiceStep extends step_1.default {
75
75
  throw new Error(`failed to send command to call: ${id}`);
76
76
  }
77
77
  async handleHeartbeat(call) {
78
- const allowHeartbeat = this.canVoicerHearbeat(call);
78
+ const allowHeartbeat = this.canVoicerHeartbeat(call);
79
79
  if (allowHeartbeat) {
80
80
  if (this.thread.background) {
81
81
  delete this.waits.timeout;
82
82
  return;
83
83
  }
84
- const expectHearbeatBefore = Date.now() + 290000;
85
- this.triggers.deadline(expectHearbeatBefore, () => {
84
+ const expectHeartbeatBefore = Date.now() + 290000;
85
+ this.triggers.deadline(expectHeartbeatBefore, () => {
86
86
  this.thread.background = true;
87
87
  if (call.ended) {
88
88
  this.log.warn('missing heartbeat, call is ended');
@@ -149,7 +149,7 @@ class VoiceStep extends step_1.default {
149
149
  const commands = [command];
150
150
  const stopRecording = (call.recordCall && sensitiveData?.muteStep);
151
151
  if (call.vv >= 2) {
152
- // newer voicer version automaically should stop/resume session recording
152
+ // newer voicer version automatically should stop/resume session recording
153
153
  command.params.sensitiveData = stopRecording ? sensitiveData : {};
154
154
  }
155
155
  else if (stopRecording) {
@@ -232,14 +232,14 @@ class VoiceStep extends step_1.default {
232
232
  EventValue: {
233
233
  eventId: interruptionMetadata.reporterTranscriptEventId,
234
234
  eventValue: {
235
- Message: reportingObject.message,
236
- },
237
- },
235
+ Message: reportingObject.message
236
+ }
237
+ }
238
238
  };
239
- this.log.debug(`Augment Transcript`, {
239
+ this.log.debug('Augment Transcript', {
240
240
  interruptionMetadata,
241
241
  updateReportingObject,
242
- shouldBeSendToHitl: this.process.cache.hitl != null,
242
+ shouldBeSendToHitl: this.process.cache.hitl != null
243
243
  });
244
244
  if (this.process.cache.hitl) {
245
245
  await this.process.cache.hitl.queueEvents([updateReportingObject]);
@@ -401,16 +401,20 @@ class VoiceStep extends step_1.default {
401
401
  get rptsStarted() {
402
402
  return this.rptsIndex !== 0;
403
403
  }
404
- canVoicerHearbeat(call) {
404
+ canVoicerHeartbeat(call) {
405
405
  return call.vv >= 1;
406
406
  }
407
+ /** @deprecated use `this.canVoicerHeartbeat` instead */
408
+ canVoicerHearbeat(call) {
409
+ return this.canVoicerHeartbeat(call);
410
+ }
407
411
  canVoicerAck(call) {
408
412
  return call.vv >= 2;
409
413
  }
410
414
  getInterruptionMetadata(event) {
411
- return event.params?.interruptionMetadata
412
- ?? event.params?.result?.interruptionMetadata
413
- ?? null;
415
+ return event.params?.interruptionMetadata ??
416
+ event.params?.result?.interruptionMetadata ??
417
+ null;
414
418
  }
415
419
  async handleInterruption(params) {
416
420
  const { call, event, speechSections, repromptsList = [], reportingSettingsKey = 'transcript' } = params;
@@ -423,7 +427,7 @@ class VoiceStep extends step_1.default {
423
427
  const current = repromptsList[this.rptsIndex - 1];
424
428
  sections.push({
425
429
  url: current?.fileName,
426
- text: current?.message,
430
+ text: current?.message
427
431
  });
428
432
  }
429
433
  else {
@@ -433,7 +437,7 @@ class VoiceStep extends step_1.default {
433
437
  action: 'Call Prompt',
434
438
  actionFromBot: true,
435
439
  sections,
436
- reportingSettingsKey,
440
+ reportingSettingsKey
437
441
  }, interruptionMetadata);
438
442
  }
439
443
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@onereach/step-voice",
3
- "version": "7.0.9-VOIC1575.9",
3
+ "version": "7.0.9-processttschunk.0",
4
4
  "author": "Roman Zolotarov <roman.zolotarov@onereach.com>",
5
5
  "contributors": [
6
6
  "Roman Zolotarov",