@whereby.com/assistant-sdk 0.0.0-canary-20251002120040 → 0.0.0-canary-20251007140529
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +119 -1
- package/dist/index.cjs +123 -203
- package/dist/index.d.cts +61 -53
- package/dist/index.d.mts +61 -53
- package/dist/index.d.ts +61 -53
- package/dist/index.mjs +124 -184
- package/dist/legacy-esm.js +124 -184
- package/dist/tools.cjs +9 -17
- package/dist/tools.d.ts +1 -2
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -1 +1,119 @@
|
|
|
1
|
-
#
|
|
1
|
+
# `whereby.com/assistant-sdk`
|
|
2
|
+
|
|
3
|
+
The `@whereby.com/assistant-sdk` lets you run headless participants in **_Node.js_**. Assistants can join rooms, combine audio from all participants, send messages, and perform in-room actions such as starting recordings.
|
|
4
|
+
It powers use cases like transcriptions, captions or streaming to realtime AI agents.
|
|
5
|
+
Use this SDK if you want to connect AI or backend services directly into Whereby rooms. For browser apps, start with the [Browser SDK](https://github.com/whereby/sdk/tree/main/packages/browser-sdk) or [React Native SDK](https://github.com/whereby/sdk/tree/main/packages/react-native-sdk).
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```shell
|
|
10
|
+
npm install @whereby.com/assistant-sdk
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
or
|
|
14
|
+
|
|
15
|
+
```shell
|
|
16
|
+
yarn add @whereby.com/assistant-sdk
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
or
|
|
20
|
+
|
|
21
|
+
```shell
|
|
22
|
+
pnpm add @whereby.com/assistant-sdk
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
[!IMPORTANT]
|
|
26
|
+
Assistants require [FFmpeg](https://ffmpeg.org/download.html) to be installed and available in your system `PATH` if you want to use combined audio streams.
|
|
27
|
+
|
|
28
|
+
## Usage
|
|
29
|
+
|
|
30
|
+
In order to use assistants, you must first create an assistant in your Whereby dashboard. This will give you an API key which you can then pass into your Assistant to allow it to join rooms. See here for more details - **_INSERT DOCS WHEN READY_**
|
|
31
|
+
|
|
32
|
+
### Getting started
|
|
33
|
+
|
|
34
|
+
```typescript
|
|
35
|
+
import "@whereby.com/assistant-sdk/polyfills"; // Required to run in Node.js
|
|
36
|
+
import { Assistant, AUDIO_STREAM_READY } from "@whereby.com/assistant-sdk";
|
|
37
|
+
|
|
38
|
+
async function main() {
|
|
39
|
+
// Create an assistant instance
|
|
40
|
+
const assistant = new Assistant({
|
|
41
|
+
roomUrl: "https://your-subdwhereby.com/your-room", // Room URL to join
|
|
42
|
+
startCombinedAudioStream: true, // Enable combined audio stream
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
// Listen for the audio stream to be ready
|
|
46
|
+
assistant.on(AUDIO_STREAM_READY, (stream) => {
|
|
47
|
+
console.log("Combined audio stream is ready:", stream);
|
|
48
|
+
// You can now pipe this stream to your transcription service or other processing
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
// Start the assistant
|
|
52
|
+
await assistant.start();
|
|
53
|
+
console.log("Assistant started and joined the room");
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
main();
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Core Concepts
|
|
60
|
+
|
|
61
|
+
### Combined Audio
|
|
62
|
+
|
|
63
|
+
Assistants can output a single `MediaStream` that mixes all participant audio. Ideal for transcription, audio only recording or passing to realtime AI services. **_FFmpeg is required for this feature._**
|
|
64
|
+
|
|
65
|
+
### In-room Actions
|
|
66
|
+
|
|
67
|
+
Assistants can perform common room operations:
|
|
68
|
+
|
|
69
|
+
- Send and receive chat messages
|
|
70
|
+
- Start and stop cloud recordings
|
|
71
|
+
- Spotlight participants
|
|
72
|
+
- Request mic/camera changes
|
|
73
|
+
|
|
74
|
+
### Trigger API
|
|
75
|
+
|
|
76
|
+
The Trigger API allows you to listen for specific webhooks, and create your assistant when those webhooks are received. This is useful for creating assistants on-demand, for example when a meeting starts. See the [Trigger API docs](https://docs.whereby.com/reference/assistant-sdk-reference/api-reference/trigger) for more details.
|
|
77
|
+
|
|
78
|
+
The Trigger API:
|
|
79
|
+
|
|
80
|
+
- Runs a lightweight server to listen for webhooks
|
|
81
|
+
- You define `webhookTrigger` - functions that decide whether to start an assistant based on the webhook payload.
|
|
82
|
+
- When the trigger condition is met, a `TRIGGER_EVENT_SUCCESS` event is emitted with the webhook payload, and you can create your assistant.
|
|
83
|
+
|
|
84
|
+
Typical usage:
|
|
85
|
+
|
|
86
|
+
```typescript
|
|
87
|
+
import "@whereby.com/assistant-sdk/polyfills"; // Required to run in Node.js
|
|
88
|
+
import { Assistant, Trigger, TRIGGER_EVENT_SUCCESS, AUDIO_STREAM_READY } from "@whereby.com/assistant-sdk";
|
|
89
|
+
|
|
90
|
+
const trigger = new Trigger({
|
|
91
|
+
webhookTriggers: {
|
|
92
|
+
"room.client.joined": () => true, // Start an assistant when first client joins
|
|
93
|
+
},
|
|
94
|
+
port: 3000, // Port to listen on
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
trigger.on(TRIGGER_EVENT_SuCCESS, async ({ roomUrl }) => {
|
|
98
|
+
// Create and start your assistant when the trigger condition is met
|
|
99
|
+
const assistant = new Assistant({
|
|
100
|
+
roomUrl,
|
|
101
|
+
startCombinedAudioStream: true,
|
|
102
|
+
assistantKey: "your-assistant-key",
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
await assistant.start();
|
|
106
|
+
|
|
107
|
+
assistant.on(AUDIO_STREAM_READY, (stream) => {
|
|
108
|
+
console.log("Combined audio stream is ready:", stream);
|
|
109
|
+
});
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
trigger.start();
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Learn more
|
|
116
|
+
|
|
117
|
+
- Assistant SDK API reference - [API Reference](https://docs.whereby.com/reference/assistant-sdk-reference)
|
|
118
|
+
- Trigger API docs - [API Reference](https://docs.whereby.com/reference/assistant-sdk-reference/api-reference/trigger)
|
|
119
|
+
[Assistant example app](https://github.com/whereby/whereby-assistant-audio-recorder)
|
package/dist/index.cjs
CHANGED
|
@@ -4,35 +4,12 @@ var core = require('@whereby.com/core');
|
|
|
4
4
|
var wrtc = require('@roamhq/wrtc');
|
|
5
5
|
var EventEmitter = require('events');
|
|
6
6
|
var child_process = require('child_process');
|
|
7
|
-
var stream = require('stream');
|
|
8
7
|
var express = require('express');
|
|
9
8
|
var assert = require('assert');
|
|
10
9
|
var bodyParser = require('body-parser');
|
|
11
|
-
var os = require('os');
|
|
12
|
-
var dotenv = require('dotenv');
|
|
13
|
-
|
|
14
|
-
function _interopNamespaceDefault(e) {
|
|
15
|
-
var n = Object.create(null);
|
|
16
|
-
if (e) {
|
|
17
|
-
Object.keys(e).forEach(function (k) {
|
|
18
|
-
if (k !== 'default') {
|
|
19
|
-
var d = Object.getOwnPropertyDescriptor(e, k);
|
|
20
|
-
Object.defineProperty(n, k, d.get ? d : {
|
|
21
|
-
enumerable: true,
|
|
22
|
-
get: function () { return e[k]; }
|
|
23
|
-
});
|
|
24
|
-
}
|
|
25
|
-
});
|
|
26
|
-
}
|
|
27
|
-
n.default = e;
|
|
28
|
-
return Object.freeze(n);
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
var dotenv__namespace = /*#__PURE__*/_interopNamespaceDefault(dotenv);
|
|
32
10
|
|
|
33
11
|
const TRIGGER_EVENT_SUCCESS = "trigger_event_success";
|
|
34
12
|
|
|
35
|
-
const AUDIO_STREAM_READY = "AUDIO_STREAM_READY";
|
|
36
13
|
const ASSISTANT_JOINED_ROOM = "ASSISTANT_JOINED_ROOM";
|
|
37
14
|
const ASSISTANT_LEFT_ROOM = "ASSISTANT_LEFT_ROOM";
|
|
38
15
|
const PARTICIPANT_VIDEO_TRACK_ADDED = "PARTICIPANT_VIDEO_TRACK_ADDED";
|
|
@@ -40,48 +17,10 @@ const PARTICIPANT_VIDEO_TRACK_REMOVED = "PARTICIPANT_VIDEO_TRACK_REMOVED";
|
|
|
40
17
|
const PARTICIPANT_AUDIO_TRACK_ADDED = "PARTICIPANT_AUDIO_TRACK_ADDED";
|
|
41
18
|
const PARTICIPANT_AUDIO_TRACK_REMOVED = "PARTICIPANT_AUDIO_TRACK_REMOVED";
|
|
42
19
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
Permission to use, copy, modify, and/or distribute this software for any
|
|
47
|
-
purpose with or without fee is hereby granted.
|
|
48
|
-
|
|
49
|
-
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
|
50
|
-
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
51
|
-
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
|
52
|
-
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
|
53
|
-
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
|
54
|
-
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
55
|
-
PERFORMANCE OF THIS SOFTWARE.
|
|
56
|
-
***************************************************************************** */
|
|
57
|
-
/* global Reflect, Promise, SuppressedError, Symbol, Iterator */
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
function __awaiter(thisArg, _arguments, P, generator) {
|
|
61
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
62
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
63
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
64
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
65
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
66
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
67
|
-
});
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
71
|
-
var e = new Error(message);
|
|
72
|
-
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
73
|
-
};
|
|
74
|
-
|
|
75
|
-
const { nonstandard: { RTCAudioSink }, } = wrtc;
|
|
76
|
-
class AudioSource extends stream.PassThrough {
|
|
77
|
-
constructor() {
|
|
78
|
-
super({
|
|
79
|
-
allowHalfOpen: true,
|
|
80
|
-
highWaterMark: 1 * 1024,
|
|
81
|
-
});
|
|
82
|
-
}
|
|
20
|
+
const { nonstandard: { RTCAudioSink, RTCAudioSource }, } = wrtc;
|
|
21
|
+
class AudioSource extends RTCAudioSource {
|
|
83
22
|
}
|
|
84
|
-
class AudioSink extends
|
|
23
|
+
class AudioSink extends RTCAudioSink {
|
|
85
24
|
constructor(track) {
|
|
86
25
|
super(track);
|
|
87
26
|
this._sink = new RTCAudioSink(track);
|
|
@@ -189,7 +128,7 @@ function createFfmpegMixer() {
|
|
|
189
128
|
* @param ff Child process handle from spawn("ffmpeg", ...)
|
|
190
129
|
* @param slotCount Number of participant input slots (0..N-1 → fd 3..3+N-1)
|
|
191
130
|
*/
|
|
192
|
-
function startPacer(ff, slotCount, rtcAudioSource
|
|
131
|
+
function startPacer(ff, slotCount, rtcAudioSource) {
|
|
193
132
|
if (stopPacerFn) {
|
|
194
133
|
stopPacerFn();
|
|
195
134
|
stopPacerFn = null;
|
|
@@ -207,8 +146,6 @@ function createFfmpegMixer() {
|
|
|
207
146
|
frameQueue: [],
|
|
208
147
|
nextDueMs: t0 + outputFrameMs,
|
|
209
148
|
rtcAudioSource,
|
|
210
|
-
onAudioStreamReady,
|
|
211
|
-
didEmitReadyEvent: false,
|
|
212
149
|
};
|
|
213
150
|
const iv = setInterval(() => {
|
|
214
151
|
const t = nowMs();
|
|
@@ -236,10 +173,6 @@ function createFfmpegMixer() {
|
|
|
236
173
|
const state = outputPacerState;
|
|
237
174
|
if (t >= state.nextDueMs) {
|
|
238
175
|
const samples = state.frameQueue.length > 0 ? state.frameQueue.shift() : new Int16Array(FRAME_10MS_SAMPLES); // silence
|
|
239
|
-
if (!state.didEmitReadyEvent) {
|
|
240
|
-
state.onAudioStreamReady();
|
|
241
|
-
state.didEmitReadyEvent = true;
|
|
242
|
-
}
|
|
243
176
|
state.rtcAudioSource.onData({
|
|
244
177
|
samples: samples,
|
|
245
178
|
sampleRate: STREAM_INPUT_SAMPLE_RATE_IN_HZ,
|
|
@@ -342,11 +275,11 @@ function createFfmpegMixer() {
|
|
|
342
275
|
* The process will log its output to stderr.
|
|
343
276
|
* @return The spawned FFmpeg process.
|
|
344
277
|
*/
|
|
345
|
-
function spawnFFmpegProcessDebug(rtcAudioSource
|
|
278
|
+
function spawnFFmpegProcessDebug(rtcAudioSource) {
|
|
346
279
|
const stdio = ["ignore", "ignore", "pipe", ...Array(PARTICIPANT_SLOTS).fill("pipe")];
|
|
347
280
|
const args = getFFmpegArgumentsDebug();
|
|
348
281
|
const ffmpegProcess = child_process.spawn("ffmpeg", args, { stdio });
|
|
349
|
-
startPacer(ffmpegProcess, PARTICIPANT_SLOTS, rtcAudioSource
|
|
282
|
+
startPacer(ffmpegProcess, PARTICIPANT_SLOTS, rtcAudioSource);
|
|
350
283
|
ffmpegProcess.stderr.setEncoding("utf8");
|
|
351
284
|
ffmpegProcess.stderr.on("data", (d) => console.error("[ffmpeg]", String(d).trim()));
|
|
352
285
|
ffmpegProcess.on("error", () => console.error("FFmpeg process error (debug): is ffmpeg installed?"));
|
|
@@ -360,11 +293,11 @@ function createFfmpegMixer() {
|
|
|
360
293
|
* @param rtcAudioSource The RTCAudioSource to which the mixed audio will be sent.
|
|
361
294
|
* @return The spawned FFmpeg process.
|
|
362
295
|
*/
|
|
363
|
-
function spawnFFmpegProcess(rtcAudioSource
|
|
296
|
+
function spawnFFmpegProcess(rtcAudioSource) {
|
|
364
297
|
const stdio = ["pipe", "pipe", "pipe", ...Array(PARTICIPANT_SLOTS).fill("pipe")];
|
|
365
298
|
const args = getFFmpegArguments();
|
|
366
299
|
const ffmpegProcess = child_process.spawn("ffmpeg", args, { stdio });
|
|
367
|
-
startPacer(ffmpegProcess, PARTICIPANT_SLOTS, rtcAudioSource
|
|
300
|
+
startPacer(ffmpegProcess, PARTICIPANT_SLOTS, rtcAudioSource);
|
|
368
301
|
ffmpegProcess.stderr.setEncoding("utf8");
|
|
369
302
|
ffmpegProcess.stderr.on("data", (d) => console.error("[ffmpeg]", String(d).trim()));
|
|
370
303
|
ffmpegProcess.on("error", () => console.error("FFmpeg process error: is ffmpeg installed?"));
|
|
@@ -462,7 +395,7 @@ function createFfmpegMixer() {
|
|
|
462
395
|
}
|
|
463
396
|
|
|
464
397
|
class AudioMixer extends EventEmitter.EventEmitter {
|
|
465
|
-
constructor(
|
|
398
|
+
constructor() {
|
|
466
399
|
super();
|
|
467
400
|
this.ffmpegProcess = null;
|
|
468
401
|
this.combinedAudioStream = null;
|
|
@@ -472,7 +405,6 @@ class AudioMixer extends EventEmitter.EventEmitter {
|
|
|
472
405
|
this.mixer = createFfmpegMixer();
|
|
473
406
|
this.setupMediaStream();
|
|
474
407
|
this.participantSlots = new Map(Array.from({ length: PARTICIPANT_SLOTS }, (_, i) => [i, ""]));
|
|
475
|
-
this.onStreamReady = onStreamReady;
|
|
476
408
|
}
|
|
477
409
|
setupMediaStream() {
|
|
478
410
|
this.rtcAudioSource = new wrtc.nonstandard.RTCAudioSource();
|
|
@@ -488,7 +420,7 @@ class AudioMixer extends EventEmitter.EventEmitter {
|
|
|
488
420
|
return;
|
|
489
421
|
}
|
|
490
422
|
if (!this.ffmpegProcess && this.rtcAudioSource) {
|
|
491
|
-
this.ffmpegProcess = this.mixer.spawnFFmpegProcess(this.rtcAudioSource
|
|
423
|
+
this.ffmpegProcess = this.mixer.spawnFFmpegProcess(this.rtcAudioSource);
|
|
492
424
|
}
|
|
493
425
|
for (const p of participants)
|
|
494
426
|
this.attachParticipantIfNeeded(p);
|
|
@@ -578,12 +510,28 @@ class AudioMixer extends EventEmitter.EventEmitter {
|
|
|
578
510
|
}
|
|
579
511
|
}
|
|
580
512
|
|
|
513
|
+
const { nonstandard: { RTCVideoSink, RTCVideoSource }, } = wrtc;
|
|
514
|
+
class VideoSource extends RTCVideoSource {
|
|
515
|
+
}
|
|
516
|
+
class VideoSink extends RTCVideoSink {
|
|
517
|
+
constructor(track) {
|
|
518
|
+
super(track);
|
|
519
|
+
this._sink = new RTCVideoSink(track);
|
|
520
|
+
}
|
|
521
|
+
subscribe(cb) {
|
|
522
|
+
this._sink.onframe = cb;
|
|
523
|
+
return () => {
|
|
524
|
+
this._sink.onframe = undefined;
|
|
525
|
+
};
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
|
|
581
529
|
class Assistant extends EventEmitter {
|
|
582
|
-
constructor({ assistantKey
|
|
530
|
+
constructor({ assistantKey }) {
|
|
583
531
|
super();
|
|
584
|
-
this.
|
|
585
|
-
this.
|
|
586
|
-
this.
|
|
532
|
+
this.localAudioSource = null;
|
|
533
|
+
this.localVideoSource = null;
|
|
534
|
+
this.combinedAudioSink = null;
|
|
587
535
|
this.remoteMediaTracks = {};
|
|
588
536
|
this.roomUrl = null;
|
|
589
537
|
this.stateSubscriptions = [];
|
|
@@ -604,28 +552,34 @@ class Assistant extends EventEmitter {
|
|
|
604
552
|
const tracks = stream.getTracks();
|
|
605
553
|
tracks.forEach((track) => {
|
|
606
554
|
if (!this.remoteMediaTracks[track.id]) {
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
555
|
+
if (track.kind === "video") {
|
|
556
|
+
this.emit(PARTICIPANT_VIDEO_TRACK_ADDED, {
|
|
557
|
+
participantId,
|
|
558
|
+
trackId: track.id,
|
|
559
|
+
data: new VideoSink(track),
|
|
560
|
+
});
|
|
561
|
+
}
|
|
562
|
+
else {
|
|
563
|
+
this.emit(PARTICIPANT_AUDIO_TRACK_ADDED, {
|
|
564
|
+
participantId,
|
|
565
|
+
trackId: track.id,
|
|
566
|
+
data: new AudioSink(track),
|
|
567
|
+
});
|
|
568
|
+
}
|
|
613
569
|
this.remoteMediaTracks[track.id] = {
|
|
614
570
|
participantId,
|
|
615
|
-
stream,
|
|
616
571
|
track,
|
|
617
572
|
};
|
|
618
573
|
}
|
|
619
574
|
});
|
|
620
575
|
return tracks;
|
|
621
576
|
});
|
|
622
|
-
Object.values(this.remoteMediaTracks).forEach(({ participantId,
|
|
577
|
+
Object.values(this.remoteMediaTracks).forEach(({ participantId, track }) => {
|
|
623
578
|
if (!currentRemoteMediaTracks.includes(track)) {
|
|
624
579
|
const eventName = track.kind === "video" ? PARTICIPANT_VIDEO_TRACK_REMOVED : PARTICIPANT_AUDIO_TRACK_REMOVED;
|
|
625
580
|
this.emit(eventName, {
|
|
626
581
|
participantId,
|
|
627
|
-
|
|
628
|
-
track,
|
|
582
|
+
trackId: track.id,
|
|
629
583
|
});
|
|
630
584
|
delete this.remoteMediaTracks[track.id];
|
|
631
585
|
}
|
|
@@ -635,135 +589,102 @@ class Assistant extends EventEmitter {
|
|
|
635
589
|
this.client = new core.WherebyClient();
|
|
636
590
|
this.roomConnection = this.client.getRoomConnection();
|
|
637
591
|
this.localMedia = this.client.getLocalMedia();
|
|
638
|
-
if (startLocalMedia) {
|
|
639
|
-
const outputAudioSource = new wrtc.nonstandard.RTCAudioSource();
|
|
640
|
-
const outputMediaStream = new wrtc.MediaStream([outputAudioSource.createTrack()]);
|
|
641
|
-
this.mediaStream = outputMediaStream;
|
|
642
|
-
this.audioSource = outputAudioSource;
|
|
643
|
-
}
|
|
644
|
-
if (startCombinedAudioStream) {
|
|
645
|
-
const handleStreamReady = () => {
|
|
646
|
-
if (!this.combinedStream) {
|
|
647
|
-
console.warn("Combined stream is not available");
|
|
648
|
-
return;
|
|
649
|
-
}
|
|
650
|
-
this.emit(AUDIO_STREAM_READY, {
|
|
651
|
-
stream: this.combinedStream,
|
|
652
|
-
track: this.combinedStream.getAudioTracks()[0],
|
|
653
|
-
});
|
|
654
|
-
};
|
|
655
|
-
const audioMixer = new AudioMixer(handleStreamReady);
|
|
656
|
-
this.combinedStream = audioMixer.getCombinedAudioStream();
|
|
657
|
-
this.stateSubscriptions.push(this.roomConnection.subscribeToRemoteParticipants(audioMixer.handleRemoteParticipants.bind(audioMixer)));
|
|
658
|
-
}
|
|
659
592
|
this.stateSubscriptions.push(this.roomConnection.subscribeToConnectionStatus(this.handleConnectionStatusChange));
|
|
660
593
|
this.stateSubscriptions.push(this.roomConnection.subscribeToRemoteParticipants(this.handleRemoteParticipantsTracksChange));
|
|
661
594
|
}
|
|
662
595
|
joinRoom(roomUrl) {
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
},
|
|
673
|
-
roomUrl,
|
|
674
|
-
isNodeSdk: true,
|
|
675
|
-
assistantKey: this.assistantKey,
|
|
676
|
-
});
|
|
677
|
-
return this.roomConnection.joinRoom();
|
|
596
|
+
this.roomUrl = roomUrl;
|
|
597
|
+
this.roomConnection.initialize({
|
|
598
|
+
localMediaOptions: {
|
|
599
|
+
audio: false,
|
|
600
|
+
video: false,
|
|
601
|
+
},
|
|
602
|
+
roomUrl,
|
|
603
|
+
isNodeSdk: true,
|
|
604
|
+
assistantKey: this.assistantKey,
|
|
678
605
|
});
|
|
679
|
-
|
|
680
|
-
startLocalMedia() {
|
|
681
|
-
if (!this.mediaStream) {
|
|
682
|
-
const outputAudioSource = new wrtc.nonstandard.RTCAudioSource();
|
|
683
|
-
const outputMediaStream = new wrtc.MediaStream([outputAudioSource.createTrack()]);
|
|
684
|
-
this.mediaStream = outputMediaStream;
|
|
685
|
-
this.audioSource = outputAudioSource;
|
|
686
|
-
}
|
|
687
|
-
this.localMedia.startMedia(this.mediaStream);
|
|
688
|
-
}
|
|
689
|
-
getLocalMediaStream() {
|
|
690
|
-
return this.mediaStream;
|
|
691
|
-
}
|
|
692
|
-
getLocalAudioSource() {
|
|
693
|
-
return this.audioSource;
|
|
606
|
+
return this.roomConnection.joinRoom();
|
|
694
607
|
}
|
|
695
608
|
getRoomConnection() {
|
|
696
609
|
return this.roomConnection;
|
|
697
610
|
}
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
this.
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
611
|
+
startLocalMedia() {
|
|
612
|
+
if (Boolean(this.localAudioSource) || Boolean(this.localVideoSource)) {
|
|
613
|
+
return;
|
|
614
|
+
}
|
|
615
|
+
this.localAudioSource = new AudioSource();
|
|
616
|
+
this.localVideoSource = new VideoSource();
|
|
617
|
+
const outputMediaStream = new wrtc.MediaStream([
|
|
618
|
+
this.localAudioSource.createTrack(),
|
|
619
|
+
this.localVideoSource.createTrack(),
|
|
620
|
+
]);
|
|
621
|
+
this.localMedia.startMedia(outputMediaStream);
|
|
622
|
+
this.localMedia.toggleMicrophone(true);
|
|
623
|
+
}
|
|
624
|
+
stopLocalMedia() {
|
|
625
|
+
this.localMedia.stopMedia();
|
|
626
|
+
this.localAudioSource = null;
|
|
627
|
+
this.localVideoSource = null;
|
|
712
628
|
}
|
|
713
|
-
|
|
714
|
-
this.
|
|
629
|
+
getLocalAudioSource() {
|
|
630
|
+
return this.localAudioSource;
|
|
715
631
|
}
|
|
716
|
-
|
|
717
|
-
this.
|
|
632
|
+
getLocalVideoSource() {
|
|
633
|
+
return this.localVideoSource;
|
|
718
634
|
}
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
this.roomConnection.askToSpeak(participantId);
|
|
722
|
-
}
|
|
723
|
-
else {
|
|
724
|
-
this.roomConnection.muteParticipants([participantId]);
|
|
725
|
-
}
|
|
635
|
+
getLocalMedia() {
|
|
636
|
+
return this.localMedia;
|
|
726
637
|
}
|
|
727
|
-
|
|
728
|
-
if (
|
|
729
|
-
this.
|
|
638
|
+
getCombinedAudioSink() {
|
|
639
|
+
if (this.combinedAudioSink) {
|
|
640
|
+
return this.combinedAudioSink;
|
|
730
641
|
}
|
|
731
|
-
|
|
732
|
-
|
|
642
|
+
const audioMixer = new AudioMixer();
|
|
643
|
+
const stream = audioMixer.getCombinedAudioStream();
|
|
644
|
+
const audioTracks = stream === null || stream === void 0 ? void 0 : stream.getAudioTracks();
|
|
645
|
+
if (audioTracks === null || audioTracks === void 0 ? void 0 : audioTracks.length) {
|
|
646
|
+
this.combinedAudioSink = new AudioSink(audioTracks[0]);
|
|
647
|
+
this.stateSubscriptions.push(this.roomConnection.subscribeToRemoteParticipants(audioMixer.handleRemoteParticipants.bind(audioMixer)));
|
|
648
|
+
return this.combinedAudioSink;
|
|
733
649
|
}
|
|
734
|
-
|
|
735
|
-
acceptWaitingParticipant(participantId) {
|
|
736
|
-
this.roomConnection.acceptWaitingParticipant(participantId);
|
|
737
|
-
}
|
|
738
|
-
rejectWaitingParticipant(participantId) {
|
|
739
|
-
this.roomConnection.rejectWaitingParticipant(participantId);
|
|
740
|
-
}
|
|
741
|
-
subscribeToRemoteParticipants(callback) {
|
|
742
|
-
return this.roomConnection.subscribeToRemoteParticipants(callback);
|
|
743
|
-
}
|
|
744
|
-
subscribeToChatMessages(callback) {
|
|
745
|
-
return this.roomConnection.subscribeToChatMessages(callback);
|
|
650
|
+
return null;
|
|
746
651
|
}
|
|
747
652
|
}
|
|
748
653
|
|
|
749
|
-
|
|
750
|
-
|
|
654
|
+
/******************************************************************************
|
|
655
|
+
Copyright (c) Microsoft Corporation.
|
|
656
|
+
|
|
657
|
+
Permission to use, copy, modify, and/or distribute this software for any
|
|
658
|
+
purpose with or without fee is hereby granted.
|
|
659
|
+
|
|
660
|
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
|
661
|
+
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
662
|
+
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
|
663
|
+
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
|
664
|
+
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
|
665
|
+
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
666
|
+
PERFORMANCE OF THIS SOFTWARE.
|
|
667
|
+
***************************************************************************** */
|
|
668
|
+
/* global Reflect, Promise, SuppressedError, Symbol, Iterator */
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
function __awaiter(thisArg, _arguments, P, generator) {
|
|
672
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
673
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
674
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
675
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
676
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
677
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
678
|
+
});
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
682
|
+
var e = new Error(message);
|
|
683
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
684
|
+
};
|
|
685
|
+
|
|
751
686
|
function buildRoomUrl(roomPath, wherebySubdomain, baseDomain = "whereby.com") {
|
|
752
|
-
|
|
753
|
-
if (IS_LOCAL === "true") {
|
|
754
|
-
const ifaceAddrs = os.networkInterfaces()[BIND_INTERFACE];
|
|
755
|
-
if (!ifaceAddrs) {
|
|
756
|
-
throw new Error(`Unknown interface ${BIND_INTERFACE}`);
|
|
757
|
-
}
|
|
758
|
-
const [bindAddr] = ifaceAddrs.filter((iface) => iface.family === "IPv4");
|
|
759
|
-
if (!bindAddr) {
|
|
760
|
-
throw new Error(`No IPv4 address found for interface ${BIND_INTERFACE}`);
|
|
761
|
-
}
|
|
762
|
-
wherebyDomain = `${wherebySubdomain}-ip-${bindAddr.address.replace(/[.]/g, "-")}.hereby.dev:4443`;
|
|
763
|
-
}
|
|
764
|
-
else {
|
|
765
|
-
wherebyDomain = `${wherebySubdomain}.${baseDomain}`;
|
|
766
|
-
}
|
|
687
|
+
const wherebyDomain = `${wherebySubdomain}.${baseDomain}`;
|
|
767
688
|
return `https://${wherebyDomain}${roomPath}`;
|
|
768
689
|
}
|
|
769
690
|
|
|
@@ -824,7 +745,6 @@ class Trigger extends EventEmitter.EventEmitter {
|
|
|
824
745
|
|
|
825
746
|
exports.ASSISTANT_JOINED_ROOM = ASSISTANT_JOINED_ROOM;
|
|
826
747
|
exports.ASSISTANT_LEFT_ROOM = ASSISTANT_LEFT_ROOM;
|
|
827
|
-
exports.AUDIO_STREAM_READY = AUDIO_STREAM_READY;
|
|
828
748
|
exports.Assistant = Assistant;
|
|
829
749
|
exports.AudioSink = AudioSink;
|
|
830
750
|
exports.AudioSource = AudioSource;
|