@fonoster/common 0.6.6 → 0.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/protos/applications.proto +4 -4
- package/dist/protos/calls.proto +34 -39
- package/dist/protos/voice.proto +49 -6
- package/dist/tts/DeepgramVoices.d.ts +22 -0
- package/dist/tts/DeepgramVoices.js +113 -0
- package/dist/tts/index.d.ts +1 -0
- package/dist/tts/index.js +1 -0
- package/dist/voice/Dial.js +1 -0
- package/dist/voice/PlaybackControl.d.ts +5 -5
- package/dist/voice/PlaybackControl.js +5 -5
- package/dist/voice/Stream.d.ts +3 -8
- package/dist/voice/Stream.js +0 -2
- package/dist/voice/StreamGather.d.ts +15 -0
- package/dist/voice/StreamGather.js +9 -0
- package/dist/voice/index.d.ts +1 -0
- package/dist/voice/index.js +1 -0
- package/dist/voice/voice.d.ts +18 -5
- package/dist/voice/voice.js +6 -0
- package/package.json +3 -3
|
@@ -39,7 +39,7 @@ service Applications {
|
|
|
39
39
|
// The type of application
|
|
40
40
|
enum ApplicationType {
|
|
41
41
|
// Programmable Voice
|
|
42
|
-
|
|
42
|
+
EXTERNAL = 0;
|
|
43
43
|
}
|
|
44
44
|
|
|
45
45
|
message ProductContainer {
|
|
@@ -60,7 +60,7 @@ message Application {
|
|
|
60
60
|
// Application type
|
|
61
61
|
ApplicationType type = 3;
|
|
62
62
|
// Endpoint for programmable voice
|
|
63
|
-
string
|
|
63
|
+
string endpoint = 4;
|
|
64
64
|
// Text to speech product
|
|
65
65
|
ProductContainer text_to_speech = 5;
|
|
66
66
|
// Speech to text product
|
|
@@ -80,7 +80,7 @@ message CreateApplicationRequest {
|
|
|
80
80
|
// Application type
|
|
81
81
|
ApplicationType type = 2;
|
|
82
82
|
// App URL for programmable voice
|
|
83
|
-
string
|
|
83
|
+
string endpoint = 3;
|
|
84
84
|
// Text to speech product
|
|
85
85
|
ProductContainer text_to_speech = 4;
|
|
86
86
|
// Speech to text product
|
|
@@ -126,7 +126,7 @@ message UpdateApplicationRequest {
|
|
|
126
126
|
// Application type
|
|
127
127
|
ApplicationType type = 3;
|
|
128
128
|
// App URL for programmable voice
|
|
129
|
-
string
|
|
129
|
+
string endpoint = 4;
|
|
130
130
|
// Text to speech product
|
|
131
131
|
ProductContainer text_to_speech = 5;
|
|
132
132
|
// Speech to text product
|
package/dist/protos/calls.proto
CHANGED
|
@@ -33,55 +33,40 @@ service Calls {
|
|
|
33
33
|
}
|
|
34
34
|
|
|
35
35
|
enum CallType {
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
enum HangupCause {
|
|
41
|
-
NORMAL_CLEARING = 0;
|
|
42
|
-
CALL_REJECTED = 1;
|
|
43
|
-
UNALLOCATED = 2;
|
|
44
|
-
NO_USER_RESPONSE = 3;
|
|
45
|
-
NO_ROUTE_DESTINATION = 4;
|
|
46
|
-
NO_ANSWER = 5;
|
|
47
|
-
USER_BUSY = 6;
|
|
48
|
-
NOT_ACCEPTABLE_HERE = 7;
|
|
49
|
-
SERVICE_UNAVAILABLE = 8;
|
|
50
|
-
INVALID_NUMBER_FORMAT = 9;
|
|
51
|
-
UNKNOWN = 10;
|
|
36
|
+
SIP_ORIGINATED = 0;
|
|
37
|
+
API_ORIGINATED = 1;
|
|
52
38
|
}
|
|
53
39
|
|
|
54
40
|
enum CallStatus {
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
// TODO: Refactor to avoid clash with HangupCause
|
|
67
|
-
// UNKNOWN = 10;
|
|
41
|
+
UNKNOWN = 0;
|
|
42
|
+
NORMAL_CLEARING = 1;
|
|
43
|
+
CALL_REJECTED = 2;
|
|
44
|
+
UNALLOCATED = 3;
|
|
45
|
+
NO_USER_RESPONSE = 4;
|
|
46
|
+
NO_ROUTE_DESTINATION = 5;
|
|
47
|
+
NO_ANSWER = 6;
|
|
48
|
+
USER_BUSY = 7;
|
|
49
|
+
NOT_ACCEPTABLE_HERE = 8;
|
|
50
|
+
SERVICE_UNAVAILABLE = 9;
|
|
51
|
+
INVALID_NUMBER_FORMAT = 10;
|
|
68
52
|
}
|
|
69
53
|
|
|
70
54
|
enum CallDirection {
|
|
71
|
-
|
|
72
|
-
|
|
55
|
+
FROM_PSTN = 0;
|
|
56
|
+
TO_PSTN = 1;
|
|
57
|
+
INTRA_NETWORK = 2;
|
|
73
58
|
}
|
|
74
59
|
|
|
75
60
|
// Message for a Call Detail Record
|
|
76
61
|
message CallDetailRecord {
|
|
77
62
|
// The unique identifier of the Call
|
|
78
63
|
string ref = 1;
|
|
64
|
+
// Call identifier from the SIP stack
|
|
65
|
+
string call_id = 2;
|
|
79
66
|
// The call type
|
|
80
|
-
CallType type =
|
|
67
|
+
CallType type = 3;
|
|
81
68
|
// The call status
|
|
82
|
-
CallStatus status =
|
|
83
|
-
// Hangup cause
|
|
84
|
-
HangupCause hangup_cause = 4;
|
|
69
|
+
CallStatus status = 4;
|
|
85
70
|
// Start time of the call
|
|
86
71
|
int32 started_at = 5;
|
|
87
72
|
// End time of the call
|
|
@@ -104,6 +89,8 @@ message CreateCallRequest {
|
|
|
104
89
|
string to = 2;
|
|
105
90
|
// Optional application reference
|
|
106
91
|
string app_ref = 3;
|
|
92
|
+
// Optional timeout in seconds
|
|
93
|
+
int32 timeout = 4;
|
|
107
94
|
}
|
|
108
95
|
|
|
109
96
|
// The response message for Calls.Call
|
|
@@ -154,8 +141,16 @@ message TrackCallRequest {
|
|
|
154
141
|
|
|
155
142
|
// The response message for Calls.TrackCall
|
|
156
143
|
message TrackCallResponse {
|
|
157
|
-
// The
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
144
|
+
// The status of the dial
|
|
145
|
+
enum Status {
|
|
146
|
+
TRYING = 0;
|
|
147
|
+
CANCEL = 1;
|
|
148
|
+
ANSWER = 2;
|
|
149
|
+
BUSY = 3;
|
|
150
|
+
PROGRESS = 4;
|
|
151
|
+
NOANSWER = 5;
|
|
152
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
153
|
+
FAILED = 6;
|
|
154
|
+
}
|
|
155
|
+
Status status = 1;
|
|
161
156
|
}
|
package/dist/protos/voice.proto
CHANGED
|
@@ -250,6 +250,7 @@ message DialStatus {
|
|
|
250
250
|
BUSY = 3;
|
|
251
251
|
PROGRESS = 4;
|
|
252
252
|
NOANSWER = 5;
|
|
253
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
253
254
|
FAILED = 6;
|
|
254
255
|
}
|
|
255
256
|
Status status = 2;
|
|
@@ -273,9 +274,6 @@ message StartStreamRequest {
|
|
|
273
274
|
WAV = 0;
|
|
274
275
|
}
|
|
275
276
|
StreamAudioFormat format = 3;
|
|
276
|
-
|
|
277
|
-
// Enable VAD (Voice Activity Detection)
|
|
278
|
-
bool enableVad = 4;
|
|
279
277
|
}
|
|
280
278
|
|
|
281
279
|
// Response to a start stream request
|
|
@@ -309,8 +307,6 @@ message StreamPayload {
|
|
|
309
307
|
AUDIO_IN = 0;
|
|
310
308
|
AUDIO_OUT = 1;
|
|
311
309
|
ERROR = 2;
|
|
312
|
-
VOICE_ACTIVITY_START = 3;
|
|
313
|
-
VOICE_ACTIVITY_END = 4;
|
|
314
310
|
}
|
|
315
311
|
StreamMessageType type = 3;
|
|
316
312
|
|
|
@@ -326,6 +322,38 @@ message StreamPayload {
|
|
|
326
322
|
string message = 6;
|
|
327
323
|
}
|
|
328
324
|
|
|
325
|
+
// Request for Stream Gather
|
|
326
|
+
message StartStreamGatherRequest {
|
|
327
|
+
// The session reference generated by the Media Server
|
|
328
|
+
string session_ref = 1;
|
|
329
|
+
|
|
330
|
+
// The source of the gather
|
|
331
|
+
enum StreamGatherSource {
|
|
332
|
+
SPEECH = 0;
|
|
333
|
+
DTMF = 1;
|
|
334
|
+
SPEECH_AND_DTMF = 2;
|
|
335
|
+
}
|
|
336
|
+
StreamGatherSource source = 2;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Request to stop a Stream Gather
|
|
340
|
+
message StopStreamGatherRequest {
|
|
341
|
+
// The session reference generated by the Media Server
|
|
342
|
+
string session_ref = 1;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// Response to Stream Gather request
|
|
346
|
+
message StreamGatherPayload {
|
|
347
|
+
// The session reference generated by the Media Server
|
|
348
|
+
string session_ref = 1;
|
|
349
|
+
|
|
350
|
+
// The gathered speech or a single digit
|
|
351
|
+
oneof content {
|
|
352
|
+
string speech = 2;
|
|
353
|
+
string digit = 3;
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
329
357
|
// VoiceInStream is the input stream for the voice service
|
|
330
358
|
message VoiceInStream {
|
|
331
359
|
oneof content {
|
|
@@ -369,7 +397,16 @@ message VoiceInStream {
|
|
|
369
397
|
StartStreamResponse start_stream_response = 13;
|
|
370
398
|
|
|
371
399
|
// Message for the bidirectional streams
|
|
372
|
-
StreamPayload stream_payload =
|
|
400
|
+
StreamPayload stream_payload = 14;
|
|
401
|
+
|
|
402
|
+
// Response to start a stream gather
|
|
403
|
+
VerbResponse start_stream_gather_response = 15;
|
|
404
|
+
|
|
405
|
+
// Response to stop a stream gather
|
|
406
|
+
VerbResponse stop_stream_gather_response = 16;
|
|
407
|
+
|
|
408
|
+
// Message with payload for the stream gather
|
|
409
|
+
StreamGatherPayload stream_gather_payload = 17;
|
|
373
410
|
}
|
|
374
411
|
}
|
|
375
412
|
|
|
@@ -417,5 +454,11 @@ message VoiceOutStream {
|
|
|
417
454
|
|
|
418
455
|
// Message for the bidirectional streams
|
|
419
456
|
StreamPayload stream_payload = 16;
|
|
457
|
+
|
|
458
|
+
// Request to start a stream gather
|
|
459
|
+
StartStreamGatherRequest start_stream_gather_request = 17;
|
|
460
|
+
|
|
461
|
+
// Request to stop a stream gather
|
|
462
|
+
StopStreamGatherRequest stop_stream_gather_request = 18;
|
|
420
463
|
}
|
|
421
464
|
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { VoiceGender, VoiceLanguage } from "./types";
|
|
2
|
+
declare enum DeepgramVoice {
|
|
3
|
+
AURA_ASTERIA_EN = "aura-asteria-en",
|
|
4
|
+
AURA_LUNA_EN = "aura-luna-en",
|
|
5
|
+
AURA_STELLA_EN = "aura-stella-en",
|
|
6
|
+
AURA_ATHENA_EN = "aura-athena-en",
|
|
7
|
+
AURA_HERA_EN = "aura-hera-en",
|
|
8
|
+
AURA_ORION_EN = "aura-orion-en",
|
|
9
|
+
AURA_ARCAS_EN = "aura-arcas-en",
|
|
10
|
+
AURA_PERSEUS_EN = "aura-perseus-en",
|
|
11
|
+
AURA_ANGUS_EN = "aura-angus-en",
|
|
12
|
+
AURA_ORPHEUS_EN = "aura-orpheus-en",
|
|
13
|
+
AURA_HELIOS_EN = "aura-helios-en",
|
|
14
|
+
AURA_ZEUS_EN = "aura-zeus-en"
|
|
15
|
+
}
|
|
16
|
+
declare const DeepgramVoiceDetails: {
|
|
17
|
+
name: DeepgramVoice;
|
|
18
|
+
displayName: string;
|
|
19
|
+
languageCode: VoiceLanguage;
|
|
20
|
+
gender: VoiceGender;
|
|
21
|
+
}[];
|
|
22
|
+
export { DeepgramVoice, DeepgramVoiceDetails };
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DeepgramVoiceDetails = exports.DeepgramVoice = void 0;
|
|
4
|
+
/* eslint-disable sonarjs/no-duplicate-string */
|
|
5
|
+
/*
|
|
6
|
+
* Copyright (C) 2024 by Fonoster Inc (https://fonoster.com)
|
|
7
|
+
* http://github.com/fonoster/fonoster
|
|
8
|
+
*
|
|
9
|
+
* This file is part of Fonoster
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the MIT License (the "License");
|
|
12
|
+
* you may not use this file except in compliance with
|
|
13
|
+
* the License. You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* https://opensource.org/licenses/MIT
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS IS"BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*/
|
|
23
|
+
const types_1 = require("./types");
|
|
24
|
+
var DeepgramVoice;
|
|
25
|
+
(function (DeepgramVoice) {
|
|
26
|
+
DeepgramVoice["AURA_ASTERIA_EN"] = "aura-asteria-en";
|
|
27
|
+
DeepgramVoice["AURA_LUNA_EN"] = "aura-luna-en";
|
|
28
|
+
DeepgramVoice["AURA_STELLA_EN"] = "aura-stella-en";
|
|
29
|
+
DeepgramVoice["AURA_ATHENA_EN"] = "aura-athena-en";
|
|
30
|
+
DeepgramVoice["AURA_HERA_EN"] = "aura-hera-en";
|
|
31
|
+
DeepgramVoice["AURA_ORION_EN"] = "aura-orion-en";
|
|
32
|
+
DeepgramVoice["AURA_ARCAS_EN"] = "aura-arcas-en";
|
|
33
|
+
DeepgramVoice["AURA_PERSEUS_EN"] = "aura-perseus-en";
|
|
34
|
+
DeepgramVoice["AURA_ANGUS_EN"] = "aura-angus-en";
|
|
35
|
+
DeepgramVoice["AURA_ORPHEUS_EN"] = "aura-orpheus-en";
|
|
36
|
+
DeepgramVoice["AURA_HELIOS_EN"] = "aura-helios-en";
|
|
37
|
+
DeepgramVoice["AURA_ZEUS_EN"] = "aura-zeus-en";
|
|
38
|
+
})(DeepgramVoice || (exports.DeepgramVoice = DeepgramVoice = {}));
|
|
39
|
+
const DeepgramVoiceDetails = [
|
|
40
|
+
{
|
|
41
|
+
name: DeepgramVoice.AURA_ASTERIA_EN,
|
|
42
|
+
displayName: "Asteria",
|
|
43
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
44
|
+
gender: types_1.VoiceGender.FEMALE
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
name: DeepgramVoice.AURA_LUNA_EN,
|
|
48
|
+
displayName: "Luna",
|
|
49
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
50
|
+
gender: types_1.VoiceGender.FEMALE
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: DeepgramVoice.AURA_STELLA_EN,
|
|
54
|
+
displayName: "Stella",
|
|
55
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
56
|
+
gender: types_1.VoiceGender.FEMALE
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
name: DeepgramVoice.AURA_ATHENA_EN,
|
|
60
|
+
displayName: "Athena",
|
|
61
|
+
languageCode: types_1.VoiceLanguage.EN_GB,
|
|
62
|
+
gender: types_1.VoiceGender.FEMALE
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
name: DeepgramVoice.AURA_HERA_EN,
|
|
66
|
+
displayName: "Hera",
|
|
67
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
68
|
+
gender: types_1.VoiceGender.FEMALE
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
name: DeepgramVoice.AURA_ORION_EN,
|
|
72
|
+
displayName: "Orion",
|
|
73
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
74
|
+
gender: types_1.VoiceGender.MALE
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
name: DeepgramVoice.AURA_ARCAS_EN,
|
|
78
|
+
displayName: "Arcas",
|
|
79
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
80
|
+
gender: types_1.VoiceGender.MALE
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
name: DeepgramVoice.AURA_PERSEUS_EN,
|
|
84
|
+
displayName: "Perseus",
|
|
85
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
86
|
+
gender: types_1.VoiceGender.MALE
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
name: DeepgramVoice.AURA_ANGUS_EN,
|
|
90
|
+
displayName: "Angus",
|
|
91
|
+
languageCode: types_1.VoiceLanguage.EN_IE,
|
|
92
|
+
gender: types_1.VoiceGender.MALE
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
name: DeepgramVoice.AURA_ORPHEUS_EN,
|
|
96
|
+
displayName: "Orpheus",
|
|
97
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
98
|
+
gender: types_1.VoiceGender.MALE
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
name: DeepgramVoice.AURA_HELIOS_EN,
|
|
102
|
+
displayName: "Helios",
|
|
103
|
+
languageCode: types_1.VoiceLanguage.EN_GB,
|
|
104
|
+
gender: types_1.VoiceGender.MALE
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
name: DeepgramVoice.AURA_ZEUS_EN,
|
|
108
|
+
displayName: "Zeus",
|
|
109
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
110
|
+
gender: types_1.VoiceGender.MALE
|
|
111
|
+
}
|
|
112
|
+
];
|
|
113
|
+
exports.DeepgramVoiceDetails = DeepgramVoiceDetails;
|
package/dist/tts/index.d.ts
CHANGED
package/dist/tts/index.js
CHANGED
|
@@ -34,4 +34,5 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
34
34
|
*/
|
|
35
35
|
__exportStar(require("./GoogleVoices"), exports);
|
|
36
36
|
__exportStar(require("./AzureVoices"), exports);
|
|
37
|
+
__exportStar(require("./DeepgramVoices"), exports);
|
|
37
38
|
__exportStar(require("./types"), exports);
|
package/dist/voice/Dial.js
CHANGED
|
@@ -15,5 +15,6 @@ var DialStatus;
|
|
|
15
15
|
DialStatus["BUSY"] = "BUSY";
|
|
16
16
|
DialStatus["PROGRESS"] = "PROGRESS";
|
|
17
17
|
DialStatus["NOANSWER"] = "NOANSWER";
|
|
18
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
18
19
|
DialStatus["FAILED"] = "FAILED";
|
|
19
20
|
})(DialStatus || (exports.DialStatus = DialStatus = {}));
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { VerbRequest } from "./Verb";
|
|
2
2
|
declare enum PlaybackControlAction {
|
|
3
|
-
STOP = "
|
|
4
|
-
RESTART = "
|
|
5
|
-
PAUSE = "
|
|
6
|
-
UNPAUSE = "
|
|
7
|
-
FORWARD = "
|
|
3
|
+
STOP = "STOP",
|
|
4
|
+
RESTART = "RESTART",
|
|
5
|
+
PAUSE = "PAUSE",
|
|
6
|
+
UNPAUSE = "UNPAUSE",
|
|
7
|
+
FORWARD = "FORWARD"
|
|
8
8
|
}
|
|
9
9
|
type PlaybackControlRequest = VerbRequest & {
|
|
10
10
|
playbackRef: string;
|
|
@@ -3,9 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.PlaybackControlAction = void 0;
|
|
4
4
|
var PlaybackControlAction;
|
|
5
5
|
(function (PlaybackControlAction) {
|
|
6
|
-
PlaybackControlAction["STOP"] = "
|
|
7
|
-
PlaybackControlAction["RESTART"] = "
|
|
8
|
-
PlaybackControlAction["PAUSE"] = "
|
|
9
|
-
PlaybackControlAction["UNPAUSE"] = "
|
|
10
|
-
PlaybackControlAction["FORWARD"] = "
|
|
6
|
+
PlaybackControlAction["STOP"] = "STOP";
|
|
7
|
+
PlaybackControlAction["RESTART"] = "RESTART";
|
|
8
|
+
PlaybackControlAction["PAUSE"] = "PAUSE";
|
|
9
|
+
PlaybackControlAction["UNPAUSE"] = "UNPAUSE";
|
|
10
|
+
PlaybackControlAction["FORWARD"] = "FORWARD";
|
|
11
11
|
})(PlaybackControlAction || (exports.PlaybackControlAction = PlaybackControlAction = {}));
|
package/dist/voice/Stream.d.ts
CHANGED
|
@@ -10,14 +10,11 @@ declare enum StreamAudioFormat {
|
|
|
10
10
|
declare enum StreamMessageType {
|
|
11
11
|
AUDIO_IN = "AUDIO_IN",
|
|
12
12
|
AUDIO_OUT = "AUDIO_OUT",
|
|
13
|
-
ERROR = "ERROR"
|
|
14
|
-
VOICE_ACTIVITY_START = "VOICE_ACTIVITY_START",
|
|
15
|
-
VOICE_ACTIVITY_STOP = "VOICE_ACTIVITY_STOP"
|
|
13
|
+
ERROR = "ERROR"
|
|
16
14
|
}
|
|
17
15
|
type StreamOptions = {
|
|
18
|
-
direction
|
|
19
|
-
format
|
|
20
|
-
enableVad: boolean;
|
|
16
|
+
direction?: StreamDirection;
|
|
17
|
+
format?: StreamAudioFormat;
|
|
21
18
|
};
|
|
22
19
|
type StartStreamRequest = VerbRequest & StreamOptions;
|
|
23
20
|
type StartStreamResponse = VerbResponse & {
|
|
@@ -29,10 +26,8 @@ type StopStreamRequest = VerbRequest & {
|
|
|
29
26
|
type StreamPayload = {
|
|
30
27
|
sessionRef: string;
|
|
31
28
|
streamRef: string;
|
|
32
|
-
direction: StreamDirection;
|
|
33
29
|
format: StreamAudioFormat;
|
|
34
30
|
type: StreamMessageType;
|
|
35
|
-
enableVad?: boolean;
|
|
36
31
|
data?: Buffer;
|
|
37
32
|
code?: string;
|
|
38
33
|
message?: string;
|
package/dist/voice/Stream.js
CHANGED
|
@@ -34,6 +34,4 @@ var StreamMessageType;
|
|
|
34
34
|
StreamMessageType["AUDIO_IN"] = "AUDIO_IN";
|
|
35
35
|
StreamMessageType["AUDIO_OUT"] = "AUDIO_OUT";
|
|
36
36
|
StreamMessageType["ERROR"] = "ERROR";
|
|
37
|
-
StreamMessageType["VOICE_ACTIVITY_START"] = "VOICE_ACTIVITY_START";
|
|
38
|
-
StreamMessageType["VOICE_ACTIVITY_STOP"] = "VOICE_ACTIVITY_STOP";
|
|
39
37
|
})(StreamMessageType || (exports.StreamMessageType = StreamMessageType = {}));
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { VerbRequest, VerbResponse } from "./Verb";
|
|
2
|
+
declare enum StreamGatherSource {
|
|
3
|
+
SPEECH = "speech",
|
|
4
|
+
DTMF = "dtmf",
|
|
5
|
+
SPEECH_AND_DTMF = "speech,dtmf"
|
|
6
|
+
}
|
|
7
|
+
type StreamGatherOptions = {
|
|
8
|
+
source?: StreamGatherSource;
|
|
9
|
+
};
|
|
10
|
+
type StartStreamGatherRequest = VerbRequest & StreamGatherOptions;
|
|
11
|
+
type StreamGatherPayload = VerbResponse & {
|
|
12
|
+
speech?: string;
|
|
13
|
+
digit?: string;
|
|
14
|
+
};
|
|
15
|
+
export { StreamGatherSource, StreamGatherPayload, StartStreamGatherRequest, StreamGatherOptions };
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.StreamGatherSource = void 0;
|
|
4
|
+
var StreamGatherSource;
|
|
5
|
+
(function (StreamGatherSource) {
|
|
6
|
+
StreamGatherSource["SPEECH"] = "speech";
|
|
7
|
+
StreamGatherSource["DTMF"] = "dtmf";
|
|
8
|
+
StreamGatherSource["SPEECH_AND_DTMF"] = "speech,dtmf";
|
|
9
|
+
})(StreamGatherSource || (exports.StreamGatherSource = StreamGatherSource = {}));
|
package/dist/voice/index.d.ts
CHANGED
package/dist/voice/index.js
CHANGED
package/dist/voice/voice.d.ts
CHANGED
|
@@ -6,11 +6,12 @@ import { PlayDtmfRequest } from "./PlayDtmf";
|
|
|
6
6
|
import { RecordRequest, RecordResponse } from "./Record";
|
|
7
7
|
import { SayRequest, SayResponse } from "./Say";
|
|
8
8
|
import { StartStreamRequest, StartStreamResponse, StopStreamRequest, StreamPayload } from "./Stream";
|
|
9
|
+
import { StartStreamGatherRequest, StreamGatherPayload } from "./StreamGather";
|
|
9
10
|
import { VerbRequest, VerbResponse, VoiceRequest } from "./Verb";
|
|
10
11
|
import { GrpcError } from "../GrpcError";
|
|
11
|
-
declare const DATA
|
|
12
|
-
declare const END
|
|
13
|
-
declare const ERROR
|
|
12
|
+
declare const DATA = "data";
|
|
13
|
+
declare const END = "end";
|
|
14
|
+
declare const ERROR = "error";
|
|
14
15
|
declare enum StreamEvent {
|
|
15
16
|
DATA = "data",
|
|
16
17
|
END = "end",
|
|
@@ -42,12 +43,18 @@ declare enum StreamContent {
|
|
|
42
43
|
START_STREAM_REQUEST = "startStreamRequest",
|
|
43
44
|
START_STREAM_RESPONSE = "startStreamResponse",
|
|
44
45
|
STOP_STREAM_REQUEST = "stopStreamRequest",
|
|
45
|
-
|
|
46
|
+
STOP_STREAM_RESPONSE = "stopStreamResponse",
|
|
47
|
+
STREAM_PAYLOAD = "streamPayload",
|
|
48
|
+
START_STREAM_GATHER_REQUEST = "startStreamGatherRequest",
|
|
49
|
+
START_STREAM_GATHER_RESPONSE = "startStreamGatherResponse",
|
|
50
|
+
STOP_STREAM_GATHER_REQUEST = "stopStreamGatherRequest",
|
|
51
|
+
STOP_STREAM_GATHER_RESPONSE = "stopStreamGatherResponse",
|
|
52
|
+
STREAM_GATHER_PAYLOAD = "streamGatherPayload"
|
|
46
53
|
}
|
|
47
54
|
type VoiceClientConfig = {
|
|
48
55
|
appRef: string;
|
|
49
56
|
accessKeyId: string;
|
|
50
|
-
|
|
57
|
+
endpoint: string;
|
|
51
58
|
ingressNumber: string;
|
|
52
59
|
callerName: string;
|
|
53
60
|
callerNumber: string;
|
|
@@ -73,6 +80,9 @@ type VoiceIn = {
|
|
|
73
80
|
};
|
|
74
81
|
startStreamResponse?: StartStreamResponse;
|
|
75
82
|
streamPayload?: StreamPayload;
|
|
83
|
+
streamGatherPayload?: StreamGatherPayload;
|
|
84
|
+
startStreamGatherResponse?: VerbResponse;
|
|
85
|
+
stopStreamGatherResponse?: VerbResponse;
|
|
76
86
|
};
|
|
77
87
|
type VoiceOut = {
|
|
78
88
|
answerRequest?: VerbRequest;
|
|
@@ -89,10 +99,13 @@ type VoiceOut = {
|
|
|
89
99
|
startStreamRequest?: StartStreamRequest;
|
|
90
100
|
stopStreamRequest?: StopStreamRequest;
|
|
91
101
|
streamPayload?: StreamPayload;
|
|
102
|
+
startStreamGatherRequest?: StartStreamGatherRequest;
|
|
103
|
+
stopStreamGatherRequest?: VerbRequest;
|
|
92
104
|
};
|
|
93
105
|
type BaseVoiceStream<T, W> = {
|
|
94
106
|
removeListener: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
95
107
|
on: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
108
|
+
once: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
96
109
|
write: (voice: W) => void;
|
|
97
110
|
end: () => void;
|
|
98
111
|
};
|
package/dist/voice/voice.js
CHANGED
|
@@ -40,5 +40,11 @@ var StreamContent;
|
|
|
40
40
|
StreamContent["START_STREAM_REQUEST"] = "startStreamRequest";
|
|
41
41
|
StreamContent["START_STREAM_RESPONSE"] = "startStreamResponse";
|
|
42
42
|
StreamContent["STOP_STREAM_REQUEST"] = "stopStreamRequest";
|
|
43
|
+
StreamContent["STOP_STREAM_RESPONSE"] = "stopStreamResponse";
|
|
43
44
|
StreamContent["STREAM_PAYLOAD"] = "streamPayload";
|
|
45
|
+
StreamContent["START_STREAM_GATHER_REQUEST"] = "startStreamGatherRequest";
|
|
46
|
+
StreamContent["START_STREAM_GATHER_RESPONSE"] = "startStreamGatherResponse";
|
|
47
|
+
StreamContent["STOP_STREAM_GATHER_REQUEST"] = "stopStreamGatherRequest";
|
|
48
|
+
StreamContent["STOP_STREAM_GATHER_RESPONSE"] = "stopStreamGatherResponse";
|
|
49
|
+
StreamContent["STREAM_GATHER_PAYLOAD"] = "streamGatherPayload";
|
|
44
50
|
})(StreamContent || (exports.StreamContent = StreamContent = {}));
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fonoster/common",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.7.2",
|
|
4
4
|
"description": "Common library for Fonoster projects",
|
|
5
5
|
"author": "Pedro Sanders <psanders@fonoster.com>",
|
|
6
6
|
"homepage": "https://github.com/fonoster/fonoster#readme",
|
|
@@ -18,7 +18,7 @@
|
|
|
18
18
|
"clean": "rimraf ./dist node_modules tsconfig.tsbuildinfo"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@fonoster/logger": "^0.
|
|
21
|
+
"@fonoster/logger": "^0.7.2",
|
|
22
22
|
"@grpc/grpc-js": "~1.10.6",
|
|
23
23
|
"@grpc/proto-loader": "^0.7.12",
|
|
24
24
|
"grpc-health-check": "^2.0.2",
|
|
@@ -42,5 +42,5 @@
|
|
|
42
42
|
"devDependencies": {
|
|
43
43
|
"@types/nodemailer": "^6.4.14"
|
|
44
44
|
},
|
|
45
|
-
"gitHead": "
|
|
45
|
+
"gitHead": "6d858d9920132dfef0dba8965e5005f44de61563"
|
|
46
46
|
}
|