@fonoster/common 0.6.6 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/protos/applications.proto +3 -3
- package/dist/protos/calls.proto +34 -39
- package/dist/protos/voice.proto +49 -1
- package/dist/tts/DeepgramVoices.d.ts +22 -0
- package/dist/tts/DeepgramVoices.js +113 -0
- package/dist/tts/index.d.ts +1 -0
- package/dist/tts/index.js +1 -0
- package/dist/voice/Dial.js +1 -0
- package/dist/voice/StreamGather.d.ts +15 -0
- package/dist/voice/StreamGather.js +9 -0
- package/dist/voice/index.d.ts +1 -0
- package/dist/voice/index.js +1 -0
- package/dist/voice/voice.d.ts +17 -5
- package/dist/voice/voice.js +5 -0
- package/package.json +3 -3
|
@@ -60,7 +60,7 @@ message Application {
|
|
|
60
60
|
// Application type
|
|
61
61
|
ApplicationType type = 3;
|
|
62
62
|
// Endpoint for programmable voice
|
|
63
|
-
string
|
|
63
|
+
string endpoint = 4;
|
|
64
64
|
// Text to speech product
|
|
65
65
|
ProductContainer text_to_speech = 5;
|
|
66
66
|
// Speech to text product
|
|
@@ -80,7 +80,7 @@ message CreateApplicationRequest {
|
|
|
80
80
|
// Application type
|
|
81
81
|
ApplicationType type = 2;
|
|
82
82
|
// App URL for programmable voice
|
|
83
|
-
string
|
|
83
|
+
string endpoint = 3;
|
|
84
84
|
// Text to speech product
|
|
85
85
|
ProductContainer text_to_speech = 4;
|
|
86
86
|
// Speech to text product
|
|
@@ -126,7 +126,7 @@ message UpdateApplicationRequest {
|
|
|
126
126
|
// Application type
|
|
127
127
|
ApplicationType type = 3;
|
|
128
128
|
// App URL for programmable voice
|
|
129
|
-
string
|
|
129
|
+
string endpoint = 4;
|
|
130
130
|
// Text to speech product
|
|
131
131
|
ProductContainer text_to_speech = 5;
|
|
132
132
|
// Speech to text product
|
package/dist/protos/calls.proto
CHANGED
|
@@ -33,55 +33,40 @@ service Calls {
|
|
|
33
33
|
}
|
|
34
34
|
|
|
35
35
|
enum CallType {
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
enum HangupCause {
|
|
41
|
-
NORMAL_CLEARING = 0;
|
|
42
|
-
CALL_REJECTED = 1;
|
|
43
|
-
UNALLOCATED = 2;
|
|
44
|
-
NO_USER_RESPONSE = 3;
|
|
45
|
-
NO_ROUTE_DESTINATION = 4;
|
|
46
|
-
NO_ANSWER = 5;
|
|
47
|
-
USER_BUSY = 6;
|
|
48
|
-
NOT_ACCEPTABLE_HERE = 7;
|
|
49
|
-
SERVICE_UNAVAILABLE = 8;
|
|
50
|
-
INVALID_NUMBER_FORMAT = 9;
|
|
51
|
-
UNKNOWN = 10;
|
|
36
|
+
SIP_ORIGINATED = 0;
|
|
37
|
+
API_ORIGINATED = 1;
|
|
52
38
|
}
|
|
53
39
|
|
|
54
40
|
enum CallStatus {
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
// TODO: Refactor to avoid clash with HangupCause
|
|
67
|
-
// UNKNOWN = 10;
|
|
41
|
+
UNKNOWN = 0;
|
|
42
|
+
NORMAL_CLEARING = 1;
|
|
43
|
+
CALL_REJECTED = 2;
|
|
44
|
+
UNALLOCATED = 3;
|
|
45
|
+
NO_USER_RESPONSE = 4;
|
|
46
|
+
NO_ROUTE_DESTINATION = 5;
|
|
47
|
+
NO_ANSWER = 6;
|
|
48
|
+
USER_BUSY = 7;
|
|
49
|
+
NOT_ACCEPTABLE_HERE = 8;
|
|
50
|
+
SERVICE_UNAVAILABLE = 9;
|
|
51
|
+
INVALID_NUMBER_FORMAT = 10;
|
|
68
52
|
}
|
|
69
53
|
|
|
70
54
|
enum CallDirection {
|
|
71
|
-
|
|
72
|
-
|
|
55
|
+
FROM_PSTN = 0;
|
|
56
|
+
TO_PSTN = 1;
|
|
57
|
+
INTRA_NETWORK = 2;
|
|
73
58
|
}
|
|
74
59
|
|
|
75
60
|
// Message for a Call Detail Record
|
|
76
61
|
message CallDetailRecord {
|
|
77
62
|
// The unique identifier of the Call
|
|
78
63
|
string ref = 1;
|
|
64
|
+
// Call identifier from the SIP stack
|
|
65
|
+
string call_id = 2;
|
|
79
66
|
// The call type
|
|
80
|
-
CallType type =
|
|
67
|
+
CallType type = 3;
|
|
81
68
|
// The call status
|
|
82
|
-
CallStatus status =
|
|
83
|
-
// Hangup cause
|
|
84
|
-
HangupCause hangup_cause = 4;
|
|
69
|
+
CallStatus status = 4;
|
|
85
70
|
// Start time of the call
|
|
86
71
|
int32 started_at = 5;
|
|
87
72
|
// End time of the call
|
|
@@ -104,6 +89,8 @@ message CreateCallRequest {
|
|
|
104
89
|
string to = 2;
|
|
105
90
|
// Optional application reference
|
|
106
91
|
string app_ref = 3;
|
|
92
|
+
// Optional timeout in seconds
|
|
93
|
+
int32 timeout = 4;
|
|
107
94
|
}
|
|
108
95
|
|
|
109
96
|
// The response message for Calls.Call
|
|
@@ -154,8 +141,16 @@ message TrackCallRequest {
|
|
|
154
141
|
|
|
155
142
|
// The response message for Calls.TrackCall
|
|
156
143
|
message TrackCallResponse {
|
|
157
|
-
// The
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
144
|
+
// The status of the dial
|
|
145
|
+
enum Status {
|
|
146
|
+
TRYING = 0;
|
|
147
|
+
CANCEL = 1;
|
|
148
|
+
ANSWER = 2;
|
|
149
|
+
BUSY = 3;
|
|
150
|
+
PROGRESS = 4;
|
|
151
|
+
NOANSWER = 5;
|
|
152
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
153
|
+
FAILED = 6;
|
|
154
|
+
}
|
|
155
|
+
Status status = 1;
|
|
161
156
|
}
|
package/dist/protos/voice.proto
CHANGED
|
@@ -250,6 +250,7 @@ message DialStatus {
|
|
|
250
250
|
BUSY = 3;
|
|
251
251
|
PROGRESS = 4;
|
|
252
252
|
NOANSWER = 5;
|
|
253
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
253
254
|
FAILED = 6;
|
|
254
255
|
}
|
|
255
256
|
Status status = 2;
|
|
@@ -326,6 +327,38 @@ message StreamPayload {
|
|
|
326
327
|
string message = 6;
|
|
327
328
|
}
|
|
328
329
|
|
|
330
|
+
// Request for Stream Gather
|
|
331
|
+
message StartStreamGatherRequest {
|
|
332
|
+
// The session reference generated by the Media Server
|
|
333
|
+
string session_ref = 1;
|
|
334
|
+
|
|
335
|
+
// The source of the gather
|
|
336
|
+
enum StreamGatherSource {
|
|
337
|
+
SPEECH = 0;
|
|
338
|
+
DTMF = 1;
|
|
339
|
+
SPEECH_AND_DTMF = 2;
|
|
340
|
+
}
|
|
341
|
+
StreamGatherSource source = 2;
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// Request to stop a Stream Gather
|
|
345
|
+
message StopStreamGatherRequest {
|
|
346
|
+
// The session reference generated by the Media Server
|
|
347
|
+
string session_ref = 1;
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Response to Stream Gather request
|
|
351
|
+
message StreamGatherPayload {
|
|
352
|
+
// The session reference generated by the Media Server
|
|
353
|
+
string session_ref = 1;
|
|
354
|
+
|
|
355
|
+
// The gathered speech or a single digit
|
|
356
|
+
oneof content {
|
|
357
|
+
string speech = 2;
|
|
358
|
+
string digit = 3;
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
329
362
|
// VoiceInStream is the input stream for the voice service
|
|
330
363
|
message VoiceInStream {
|
|
331
364
|
oneof content {
|
|
@@ -369,7 +402,16 @@ message VoiceInStream {
|
|
|
369
402
|
StartStreamResponse start_stream_response = 13;
|
|
370
403
|
|
|
371
404
|
// Message for the bidirectional streams
|
|
372
|
-
StreamPayload stream_payload =
|
|
405
|
+
StreamPayload stream_payload = 14;
|
|
406
|
+
|
|
407
|
+
// Response to start a stream gather
|
|
408
|
+
VerbResponse start_stream_gather_response = 15;
|
|
409
|
+
|
|
410
|
+
// Response to stop a stream gather
|
|
411
|
+
VerbResponse stop_stream_gather_response = 16;
|
|
412
|
+
|
|
413
|
+
// Message with payload for the stream gather
|
|
414
|
+
StreamGatherPayload stream_gather_payload = 17;
|
|
373
415
|
}
|
|
374
416
|
}
|
|
375
417
|
|
|
@@ -417,5 +459,11 @@ message VoiceOutStream {
|
|
|
417
459
|
|
|
418
460
|
// Message for the bidirectional streams
|
|
419
461
|
StreamPayload stream_payload = 16;
|
|
462
|
+
|
|
463
|
+
// Request to start a stream gather
|
|
464
|
+
StartStreamGatherRequest start_stream_gather_request = 17;
|
|
465
|
+
|
|
466
|
+
// Request to stop a stream gather
|
|
467
|
+
StopStreamGatherRequest stop_stream_gather_request = 18;
|
|
420
468
|
}
|
|
421
469
|
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { VoiceGender, VoiceLanguage } from "./types";
|
|
2
|
+
declare enum DeepgramVoice {
|
|
3
|
+
AURA_ASTERIA_EN = "aura-asteria-en",
|
|
4
|
+
AURA_LUNA_EN = "aura-luna-en",
|
|
5
|
+
AURA_STELLA_EN = "aura-stella-en",
|
|
6
|
+
AURA_ATHENA_EN = "aura-athena-en",
|
|
7
|
+
AURA_HERA_EN = "aura-hera-en",
|
|
8
|
+
AURA_ORION_EN = "aura-orion-en",
|
|
9
|
+
AURA_ARCAS_EN = "aura-arcas-en",
|
|
10
|
+
AURA_PERSEUS_EN = "aura-perseus-en",
|
|
11
|
+
AURA_ANGUS_EN = "aura-angus-en",
|
|
12
|
+
AURA_ORPHEUS_EN = "aura-orpheus-en",
|
|
13
|
+
AURA_HELIOS_EN = "aura-helios-en",
|
|
14
|
+
AURA_ZEUS_EN = "aura-zeus-en"
|
|
15
|
+
}
|
|
16
|
+
declare const DeepgramVoiceDetails: {
|
|
17
|
+
name: DeepgramVoice;
|
|
18
|
+
displayName: string;
|
|
19
|
+
languageCode: VoiceLanguage;
|
|
20
|
+
gender: VoiceGender;
|
|
21
|
+
}[];
|
|
22
|
+
export { DeepgramVoice, DeepgramVoiceDetails };
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DeepgramVoiceDetails = exports.DeepgramVoice = void 0;
|
|
4
|
+
/* eslint-disable sonarjs/no-duplicate-string */
|
|
5
|
+
/*
|
|
6
|
+
* Copyright (C) 2024 by Fonoster Inc (https://fonoster.com)
|
|
7
|
+
* http://github.com/fonoster/fonoster
|
|
8
|
+
*
|
|
9
|
+
* This file is part of Fonoster
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the MIT License (the "License");
|
|
12
|
+
* you may not use this file except in compliance with
|
|
13
|
+
* the License. You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* https://opensource.org/licenses/MIT
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS IS"BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*/
|
|
23
|
+
const types_1 = require("./types");
|
|
24
|
+
var DeepgramVoice;
|
|
25
|
+
(function (DeepgramVoice) {
|
|
26
|
+
DeepgramVoice["AURA_ASTERIA_EN"] = "aura-asteria-en";
|
|
27
|
+
DeepgramVoice["AURA_LUNA_EN"] = "aura-luna-en";
|
|
28
|
+
DeepgramVoice["AURA_STELLA_EN"] = "aura-stella-en";
|
|
29
|
+
DeepgramVoice["AURA_ATHENA_EN"] = "aura-athena-en";
|
|
30
|
+
DeepgramVoice["AURA_HERA_EN"] = "aura-hera-en";
|
|
31
|
+
DeepgramVoice["AURA_ORION_EN"] = "aura-orion-en";
|
|
32
|
+
DeepgramVoice["AURA_ARCAS_EN"] = "aura-arcas-en";
|
|
33
|
+
DeepgramVoice["AURA_PERSEUS_EN"] = "aura-perseus-en";
|
|
34
|
+
DeepgramVoice["AURA_ANGUS_EN"] = "aura-angus-en";
|
|
35
|
+
DeepgramVoice["AURA_ORPHEUS_EN"] = "aura-orpheus-en";
|
|
36
|
+
DeepgramVoice["AURA_HELIOS_EN"] = "aura-helios-en";
|
|
37
|
+
DeepgramVoice["AURA_ZEUS_EN"] = "aura-zeus-en";
|
|
38
|
+
})(DeepgramVoice || (exports.DeepgramVoice = DeepgramVoice = {}));
|
|
39
|
+
const DeepgramVoiceDetails = [
|
|
40
|
+
{
|
|
41
|
+
name: DeepgramVoice.AURA_ASTERIA_EN,
|
|
42
|
+
displayName: "Asteria",
|
|
43
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
44
|
+
gender: types_1.VoiceGender.FEMALE
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
name: DeepgramVoice.AURA_LUNA_EN,
|
|
48
|
+
displayName: "Luna",
|
|
49
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
50
|
+
gender: types_1.VoiceGender.FEMALE
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: DeepgramVoice.AURA_STELLA_EN,
|
|
54
|
+
displayName: "Stella",
|
|
55
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
56
|
+
gender: types_1.VoiceGender.FEMALE
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
name: DeepgramVoice.AURA_ATHENA_EN,
|
|
60
|
+
displayName: "Athena",
|
|
61
|
+
languageCode: types_1.VoiceLanguage.EN_GB,
|
|
62
|
+
gender: types_1.VoiceGender.FEMALE
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
name: DeepgramVoice.AURA_HERA_EN,
|
|
66
|
+
displayName: "Hera",
|
|
67
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
68
|
+
gender: types_1.VoiceGender.FEMALE
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
name: DeepgramVoice.AURA_ORION_EN,
|
|
72
|
+
displayName: "Orion",
|
|
73
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
74
|
+
gender: types_1.VoiceGender.MALE
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
name: DeepgramVoice.AURA_ARCAS_EN,
|
|
78
|
+
displayName: "Arcas",
|
|
79
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
80
|
+
gender: types_1.VoiceGender.MALE
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
name: DeepgramVoice.AURA_PERSEUS_EN,
|
|
84
|
+
displayName: "Perseus",
|
|
85
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
86
|
+
gender: types_1.VoiceGender.MALE
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
name: DeepgramVoice.AURA_ANGUS_EN,
|
|
90
|
+
displayName: "Angus",
|
|
91
|
+
languageCode: types_1.VoiceLanguage.EN_IE,
|
|
92
|
+
gender: types_1.VoiceGender.MALE
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
name: DeepgramVoice.AURA_ORPHEUS_EN,
|
|
96
|
+
displayName: "Orpheus",
|
|
97
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
98
|
+
gender: types_1.VoiceGender.MALE
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
name: DeepgramVoice.AURA_HELIOS_EN,
|
|
102
|
+
displayName: "Helios",
|
|
103
|
+
languageCode: types_1.VoiceLanguage.EN_GB,
|
|
104
|
+
gender: types_1.VoiceGender.MALE
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
name: DeepgramVoice.AURA_ZEUS_EN,
|
|
108
|
+
displayName: "Zeus",
|
|
109
|
+
languageCode: types_1.VoiceLanguage.EN_US,
|
|
110
|
+
gender: types_1.VoiceGender.MALE
|
|
111
|
+
}
|
|
112
|
+
];
|
|
113
|
+
exports.DeepgramVoiceDetails = DeepgramVoiceDetails;
|
package/dist/tts/index.d.ts
CHANGED
package/dist/tts/index.js
CHANGED
|
@@ -34,4 +34,5 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
34
34
|
*/
|
|
35
35
|
__exportStar(require("./GoogleVoices"), exports);
|
|
36
36
|
__exportStar(require("./AzureVoices"), exports);
|
|
37
|
+
__exportStar(require("./DeepgramVoices"), exports);
|
|
37
38
|
__exportStar(require("./types"), exports);
|
package/dist/voice/Dial.js
CHANGED
|
@@ -15,5 +15,6 @@ var DialStatus;
|
|
|
15
15
|
DialStatus["BUSY"] = "BUSY";
|
|
16
16
|
DialStatus["PROGRESS"] = "PROGRESS";
|
|
17
17
|
DialStatus["NOANSWER"] = "NOANSWER";
|
|
18
|
+
// Maps from Asterisk's CHANUNAVAIL and CONGESTION
|
|
18
19
|
DialStatus["FAILED"] = "FAILED";
|
|
19
20
|
})(DialStatus || (exports.DialStatus = DialStatus = {}));
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { VerbRequest, VerbResponse } from "./Verb";
|
|
2
|
+
declare enum StreamGatherSource {
|
|
3
|
+
SPEECH = "speech",
|
|
4
|
+
DTMF = "dtmf",
|
|
5
|
+
SPEECH_AND_DTMF = "speech,dtmf"
|
|
6
|
+
}
|
|
7
|
+
type StreamGatherOptions = {
|
|
8
|
+
source?: StreamGatherSource;
|
|
9
|
+
};
|
|
10
|
+
type StartStreamGatherRequest = VerbRequest & StreamGatherOptions;
|
|
11
|
+
type StreamGatherPayload = VerbResponse & {
|
|
12
|
+
speech?: string;
|
|
13
|
+
digit?: string;
|
|
14
|
+
};
|
|
15
|
+
export { StreamGatherSource, StreamGatherPayload, StartStreamGatherRequest, StreamGatherOptions };
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.StreamGatherSource = void 0;
|
|
4
|
+
var StreamGatherSource;
|
|
5
|
+
(function (StreamGatherSource) {
|
|
6
|
+
StreamGatherSource["SPEECH"] = "speech";
|
|
7
|
+
StreamGatherSource["DTMF"] = "dtmf";
|
|
8
|
+
StreamGatherSource["SPEECH_AND_DTMF"] = "speech,dtmf";
|
|
9
|
+
})(StreamGatherSource || (exports.StreamGatherSource = StreamGatherSource = {}));
|
package/dist/voice/index.d.ts
CHANGED
package/dist/voice/index.js
CHANGED
package/dist/voice/voice.d.ts
CHANGED
|
@@ -6,11 +6,12 @@ import { PlayDtmfRequest } from "./PlayDtmf";
|
|
|
6
6
|
import { RecordRequest, RecordResponse } from "./Record";
|
|
7
7
|
import { SayRequest, SayResponse } from "./Say";
|
|
8
8
|
import { StartStreamRequest, StartStreamResponse, StopStreamRequest, StreamPayload } from "./Stream";
|
|
9
|
+
import { StartStreamGatherRequest, StreamGatherPayload } from "./StreamGather";
|
|
9
10
|
import { VerbRequest, VerbResponse, VoiceRequest } from "./Verb";
|
|
10
11
|
import { GrpcError } from "../GrpcError";
|
|
11
|
-
declare const DATA
|
|
12
|
-
declare const END
|
|
13
|
-
declare const ERROR
|
|
12
|
+
declare const DATA = "data";
|
|
13
|
+
declare const END = "end";
|
|
14
|
+
declare const ERROR = "error";
|
|
14
15
|
declare enum StreamEvent {
|
|
15
16
|
DATA = "data",
|
|
16
17
|
END = "end",
|
|
@@ -42,12 +43,17 @@ declare enum StreamContent {
|
|
|
42
43
|
START_STREAM_REQUEST = "startStreamRequest",
|
|
43
44
|
START_STREAM_RESPONSE = "startStreamResponse",
|
|
44
45
|
STOP_STREAM_REQUEST = "stopStreamRequest",
|
|
45
|
-
STREAM_PAYLOAD = "streamPayload"
|
|
46
|
+
STREAM_PAYLOAD = "streamPayload",
|
|
47
|
+
START_STREAM_GATHER_REQUEST = "startStreamGatherRequest",
|
|
48
|
+
START_STREAM_GATHER_RESPONSE = "startStreamGatherResponse",
|
|
49
|
+
STOP_STREAM_GATHER_REQUEST = "stopStreamGatherRequest",
|
|
50
|
+
STOP_STREAM_GATHER_RESPONSE = "stopStreamGatherResponse",
|
|
51
|
+
STREAM_GATHER_PAYLOAD = "streamGatherPayload"
|
|
46
52
|
}
|
|
47
53
|
type VoiceClientConfig = {
|
|
48
54
|
appRef: string;
|
|
49
55
|
accessKeyId: string;
|
|
50
|
-
|
|
56
|
+
endpoint: string;
|
|
51
57
|
ingressNumber: string;
|
|
52
58
|
callerName: string;
|
|
53
59
|
callerNumber: string;
|
|
@@ -73,6 +79,9 @@ type VoiceIn = {
|
|
|
73
79
|
};
|
|
74
80
|
startStreamResponse?: StartStreamResponse;
|
|
75
81
|
streamPayload?: StreamPayload;
|
|
82
|
+
streamGatherPayload?: StreamGatherPayload;
|
|
83
|
+
startStreamGatherResponse?: VerbResponse;
|
|
84
|
+
stopStreamGatherResponse?: VerbResponse;
|
|
76
85
|
};
|
|
77
86
|
type VoiceOut = {
|
|
78
87
|
answerRequest?: VerbRequest;
|
|
@@ -89,10 +98,13 @@ type VoiceOut = {
|
|
|
89
98
|
startStreamRequest?: StartStreamRequest;
|
|
90
99
|
stopStreamRequest?: StopStreamRequest;
|
|
91
100
|
streamPayload?: StreamPayload;
|
|
101
|
+
startStreamGatherRequest?: StartStreamGatherRequest;
|
|
102
|
+
stopStreamGatherRequest?: VerbRequest;
|
|
92
103
|
};
|
|
93
104
|
type BaseVoiceStream<T, W> = {
|
|
94
105
|
removeListener: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
95
106
|
on: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
107
|
+
once: (e: StreamEvent, cb: (voice: T) => void) => void;
|
|
96
108
|
write: (voice: W) => void;
|
|
97
109
|
end: () => void;
|
|
98
110
|
};
|
package/dist/voice/voice.js
CHANGED
|
@@ -41,4 +41,9 @@ var StreamContent;
|
|
|
41
41
|
StreamContent["START_STREAM_RESPONSE"] = "startStreamResponse";
|
|
42
42
|
StreamContent["STOP_STREAM_REQUEST"] = "stopStreamRequest";
|
|
43
43
|
StreamContent["STREAM_PAYLOAD"] = "streamPayload";
|
|
44
|
+
StreamContent["START_STREAM_GATHER_REQUEST"] = "startStreamGatherRequest";
|
|
45
|
+
StreamContent["START_STREAM_GATHER_RESPONSE"] = "startStreamGatherResponse";
|
|
46
|
+
StreamContent["STOP_STREAM_GATHER_REQUEST"] = "stopStreamGatherRequest";
|
|
47
|
+
StreamContent["STOP_STREAM_GATHER_RESPONSE"] = "stopStreamGatherResponse";
|
|
48
|
+
StreamContent["STREAM_GATHER_PAYLOAD"] = "streamGatherPayload";
|
|
44
49
|
})(StreamContent || (exports.StreamContent = StreamContent = {}));
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fonoster/common",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.7.0",
|
|
4
4
|
"description": "Common library for Fonoster projects",
|
|
5
5
|
"author": "Pedro Sanders <psanders@fonoster.com>",
|
|
6
6
|
"homepage": "https://github.com/fonoster/fonoster#readme",
|
|
@@ -18,7 +18,7 @@
|
|
|
18
18
|
"clean": "rimraf ./dist node_modules tsconfig.tsbuildinfo"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@fonoster/logger": "^0.
|
|
21
|
+
"@fonoster/logger": "^0.7.0",
|
|
22
22
|
"@grpc/grpc-js": "~1.10.6",
|
|
23
23
|
"@grpc/proto-loader": "^0.7.12",
|
|
24
24
|
"grpc-health-check": "^2.0.2",
|
|
@@ -42,5 +42,5 @@
|
|
|
42
42
|
"devDependencies": {
|
|
43
43
|
"@types/nodemailer": "^6.4.14"
|
|
44
44
|
},
|
|
45
|
-
"gitHead": "
|
|
45
|
+
"gitHead": "afa950ace3b1d022b6dc8be0c3b87a6b8a5ba3c5"
|
|
46
46
|
}
|