@tensamin/audio 0.2.6 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -87,12 +87,4 @@ output: {
87
87
  maxGainDb?: number; // default: 6.0
88
88
  smoothTransitions?: boolean;// default: true
89
89
  }
90
- ``+
91
-
92
- ### LiveKit mute handling
93
-
94
- ```ts
95
- muteWhenSilent?: boolean; // default: false
96
- ````
97
-
98
- When `muteWhenSilent` is `true`, the library automatically calls `track.mute()` when silence is detected and `track.unmute()` when speech resumes (only if it muted the track itself).
90
+ ```
@@ -1,9 +1,9 @@
1
1
  import {
2
2
  createAudioPipeline
3
- } from "./chunk-7DOV5ZIH.mjs";
3
+ } from "./chunk-W7DJJ6RY.mjs";
4
4
  import {
5
5
  createRemoteAudioMonitor
6
- } from "./chunk-47YTMONE.mjs";
6
+ } from "./chunk-SIXRS6UJ.mjs";
7
7
 
8
8
  // src/livekit/integration.ts
9
9
  async function attachSpeakingDetectionToTrack(track, options = {}) {
@@ -21,16 +21,30 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
21
21
  const listeners = /* @__PURE__ */ new Set();
22
22
  let mutedByController = false;
23
23
  let currentState = pipeline.state;
24
+ let gainNode = null;
25
+ if (options.muteWhenSilent) {
26
+ try {
27
+ const audioContext = new AudioContext();
28
+ const source = audioContext.createMediaStreamSource(
29
+ new MediaStream([pipeline.processedTrack])
30
+ );
31
+ gainNode = audioContext.createGain();
32
+ source.connect(gainNode);
33
+ gainNode.connect(audioContext.destination);
34
+ } catch (error) {
35
+ console.error("Failed to create gain node for volume control", error);
36
+ }
37
+ }
24
38
  const speakingHandler = (state) => {
25
39
  currentState = state;
26
40
  listeners.forEach((listener) => listener(state));
27
- if (options.muteWhenSilent) {
28
- if (!state.speaking && !track.isMuted) {
29
- track.mute().catch((error) => console.error("mute failed", error));
41
+ if (options.muteWhenSilent && gainNode) {
42
+ if (!state.speaking && gainNode.gain.value > 0) {
43
+ gainNode.gain.value = 0;
30
44
  mutedByController = true;
31
45
  }
32
46
  if (state.speaking && mutedByController) {
33
- track.unmute().catch((error) => console.error("unmute failed", error));
47
+ gainNode.gain.value = 1;
34
48
  mutedByController = false;
35
49
  }
36
50
  }
@@ -62,9 +76,15 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
62
76
  pipeline.events.off("speakingChange", speakingHandler);
63
77
  pipeline.events.off("error", errorHandler);
64
78
  listeners.clear();
65
- if (mutedByController && !track.isMuted) {
66
- track.unmute().catch((error) => console.error("unmute failed", error));
67
- mutedByController = false;
79
+ if (gainNode) {
80
+ if (mutedByController && gainNode.gain.value === 0) {
81
+ gainNode.gain.value = 1;
82
+ mutedByController = false;
83
+ }
84
+ try {
85
+ gainNode.disconnect();
86
+ } catch (error) {
87
+ }
68
88
  }
69
89
  pipeline.dispose();
70
90
  if (originalTrack.readyState === "live") {
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  DEFAULT_SPEAKING_DETECTION_CONFIG
3
- } from "./chunk-FYX5A3OM.mjs";
3
+ } from "./chunk-MMXH5JTD.mjs";
4
4
 
5
5
  // src/vad/vad-state.ts
6
6
  var LevelBasedVAD = class {
@@ -23,7 +23,7 @@ var DEFAULT_LIVEKIT_SPEAKING_OPTIONS = {
23
23
  noiseSuppression: DEFAULT_NOISE_SUPPRESSION_CONFIG,
24
24
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG,
25
25
  output: DEFAULT_OUTPUT_GAIN_CONFIG,
26
- muteWhenSilent: false
26
+ muteWhenSilent: true
27
27
  };
28
28
  var DEFAULT_REMOTE_SPEAKING_OPTIONS = {
29
29
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG
@@ -3,11 +3,11 @@ import {
3
3
  } from "./chunk-QNQK6QFB.mjs";
4
4
  import {
5
5
  LevelBasedVAD
6
- } from "./chunk-XHU4WPFD.mjs";
6
+ } from "./chunk-HNHNU5MU.mjs";
7
7
  import {
8
8
  DEFAULT_REMOTE_SPEAKING_OPTIONS,
9
9
  DEFAULT_SPEAKING_DETECTION_CONFIG
10
- } from "./chunk-FYX5A3OM.mjs";
10
+ } from "./chunk-MMXH5JTD.mjs";
11
11
  import {
12
12
  getAudioContext,
13
13
  registerPipeline,
@@ -6,13 +6,13 @@ import {
6
6
  } from "./chunk-QNQK6QFB.mjs";
7
7
  import {
8
8
  LevelBasedVAD
9
- } from "./chunk-XHU4WPFD.mjs";
9
+ } from "./chunk-HNHNU5MU.mjs";
10
10
  import {
11
11
  DEFAULT_LIVEKIT_SPEAKING_OPTIONS,
12
12
  DEFAULT_NOISE_SUPPRESSION_CONFIG,
13
13
  DEFAULT_OUTPUT_GAIN_CONFIG,
14
14
  DEFAULT_SPEAKING_DETECTION_CONFIG
15
- } from "./chunk-FYX5A3OM.mjs";
15
+ } from "./chunk-MMXH5JTD.mjs";
16
16
  import {
17
17
  getAudioContext,
18
18
  registerPipeline,
package/dist/defaults.js CHANGED
@@ -51,7 +51,7 @@ var DEFAULT_LIVEKIT_SPEAKING_OPTIONS = {
51
51
  noiseSuppression: DEFAULT_NOISE_SUPPRESSION_CONFIG,
52
52
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG,
53
53
  output: DEFAULT_OUTPUT_GAIN_CONFIG,
54
- muteWhenSilent: false
54
+ muteWhenSilent: true
55
55
  };
56
56
  var DEFAULT_REMOTE_SPEAKING_OPTIONS = {
57
57
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG
package/dist/defaults.mjs CHANGED
@@ -4,7 +4,7 @@ import {
4
4
  DEFAULT_OUTPUT_GAIN_CONFIG,
5
5
  DEFAULT_REMOTE_SPEAKING_OPTIONS,
6
6
  DEFAULT_SPEAKING_DETECTION_CONFIG
7
- } from "./chunk-FYX5A3OM.mjs";
7
+ } from "./chunk-MMXH5JTD.mjs";
8
8
  export {
9
9
  DEFAULT_LIVEKIT_SPEAKING_OPTIONS,
10
10
  DEFAULT_NOISE_SUPPRESSION_CONFIG,
package/dist/index.js CHANGED
@@ -65,7 +65,7 @@ var DEFAULT_LIVEKIT_SPEAKING_OPTIONS = {
65
65
  noiseSuppression: DEFAULT_NOISE_SUPPRESSION_CONFIG,
66
66
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG,
67
67
  output: DEFAULT_OUTPUT_GAIN_CONFIG,
68
- muteWhenSilent: false
68
+ muteWhenSilent: true
69
69
  };
70
70
  var DEFAULT_REMOTE_SPEAKING_OPTIONS = {
71
71
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG
@@ -503,16 +503,30 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
503
503
  const listeners = /* @__PURE__ */ new Set();
504
504
  let mutedByController = false;
505
505
  let currentState = pipeline.state;
506
+ let gainNode = null;
507
+ if (options.muteWhenSilent) {
508
+ try {
509
+ const audioContext = new AudioContext();
510
+ const source = audioContext.createMediaStreamSource(
511
+ new MediaStream([pipeline.processedTrack])
512
+ );
513
+ gainNode = audioContext.createGain();
514
+ source.connect(gainNode);
515
+ gainNode.connect(audioContext.destination);
516
+ } catch (error) {
517
+ console.error("Failed to create gain node for volume control", error);
518
+ }
519
+ }
506
520
  const speakingHandler = (state) => {
507
521
  currentState = state;
508
522
  listeners.forEach((listener) => listener(state));
509
- if (options.muteWhenSilent) {
510
- if (!state.speaking && !track.isMuted) {
511
- track.mute().catch((error) => console.error("mute failed", error));
523
+ if (options.muteWhenSilent && gainNode) {
524
+ if (!state.speaking && gainNode.gain.value > 0) {
525
+ gainNode.gain.value = 0;
512
526
  mutedByController = true;
513
527
  }
514
528
  if (state.speaking && mutedByController) {
515
- track.unmute().catch((error) => console.error("unmute failed", error));
529
+ gainNode.gain.value = 1;
516
530
  mutedByController = false;
517
531
  }
518
532
  }
@@ -544,9 +558,15 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
544
558
  pipeline.events.off("speakingChange", speakingHandler);
545
559
  pipeline.events.off("error", errorHandler);
546
560
  listeners.clear();
547
- if (mutedByController && !track.isMuted) {
548
- track.unmute().catch((error) => console.error("unmute failed", error));
549
- mutedByController = false;
561
+ if (gainNode) {
562
+ if (mutedByController && gainNode.gain.value === 0) {
563
+ gainNode.gain.value = 1;
564
+ mutedByController = false;
565
+ }
566
+ try {
567
+ gainNode.disconnect();
568
+ } catch (error) {
569
+ }
550
570
  }
551
571
  pipeline.dispose();
552
572
  if (originalTrack.readyState === "live") {
package/dist/index.mjs CHANGED
@@ -2,19 +2,19 @@ import "./chunk-WBQAMGXK.mjs";
2
2
  import {
3
3
  attachSpeakingDetectionToRemoteTrack,
4
4
  attachSpeakingDetectionToTrack
5
- } from "./chunk-Z36BDQQV.mjs";
6
- import "./chunk-7DOV5ZIH.mjs";
5
+ } from "./chunk-EFZHGDVK.mjs";
6
+ import "./chunk-W7DJJ6RY.mjs";
7
7
  import "./chunk-IS37FHDN.mjs";
8
- import "./chunk-47YTMONE.mjs";
8
+ import "./chunk-SIXRS6UJ.mjs";
9
9
  import "./chunk-QNQK6QFB.mjs";
10
- import "./chunk-XHU4WPFD.mjs";
10
+ import "./chunk-HNHNU5MU.mjs";
11
11
  import {
12
12
  DEFAULT_LIVEKIT_SPEAKING_OPTIONS,
13
13
  DEFAULT_NOISE_SUPPRESSION_CONFIG,
14
14
  DEFAULT_OUTPUT_GAIN_CONFIG,
15
15
  DEFAULT_REMOTE_SPEAKING_OPTIONS,
16
16
  DEFAULT_SPEAKING_DETECTION_CONFIG
17
- } from "./chunk-FYX5A3OM.mjs";
17
+ } from "./chunk-MMXH5JTD.mjs";
18
18
  import "./chunk-OZ7KMC4S.mjs";
19
19
  export {
20
20
  DEFAULT_LIVEKIT_SPEAKING_OPTIONS,
@@ -182,7 +182,7 @@ var DEFAULT_LIVEKIT_SPEAKING_OPTIONS = {
182
182
  noiseSuppression: DEFAULT_NOISE_SUPPRESSION_CONFIG,
183
183
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG,
184
184
  output: DEFAULT_OUTPUT_GAIN_CONFIG,
185
- muteWhenSilent: false
185
+ muteWhenSilent: true
186
186
  };
187
187
  var DEFAULT_REMOTE_SPEAKING_OPTIONS = {
188
188
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG
@@ -498,16 +498,30 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
498
498
  const listeners = /* @__PURE__ */ new Set();
499
499
  let mutedByController = false;
500
500
  let currentState = pipeline.state;
501
+ let gainNode = null;
502
+ if (options.muteWhenSilent) {
503
+ try {
504
+ const audioContext = new AudioContext();
505
+ const source = audioContext.createMediaStreamSource(
506
+ new MediaStream([pipeline.processedTrack])
507
+ );
508
+ gainNode = audioContext.createGain();
509
+ source.connect(gainNode);
510
+ gainNode.connect(audioContext.destination);
511
+ } catch (error) {
512
+ console.error("Failed to create gain node for volume control", error);
513
+ }
514
+ }
501
515
  const speakingHandler = (state) => {
502
516
  currentState = state;
503
517
  listeners.forEach((listener) => listener(state));
504
- if (options.muteWhenSilent) {
505
- if (!state.speaking && !track.isMuted) {
506
- track.mute().catch((error) => console.error("mute failed", error));
518
+ if (options.muteWhenSilent && gainNode) {
519
+ if (!state.speaking && gainNode.gain.value > 0) {
520
+ gainNode.gain.value = 0;
507
521
  mutedByController = true;
508
522
  }
509
523
  if (state.speaking && mutedByController) {
510
- track.unmute().catch((error) => console.error("unmute failed", error));
524
+ gainNode.gain.value = 1;
511
525
  mutedByController = false;
512
526
  }
513
527
  }
@@ -539,9 +553,15 @@ async function attachSpeakingDetectionToTrack(track, options = {}) {
539
553
  pipeline.events.off("speakingChange", speakingHandler);
540
554
  pipeline.events.off("error", errorHandler);
541
555
  listeners.clear();
542
- if (mutedByController && !track.isMuted) {
543
- track.unmute().catch((error) => console.error("unmute failed", error));
544
- mutedByController = false;
556
+ if (gainNode) {
557
+ if (mutedByController && gainNode.gain.value === 0) {
558
+ gainNode.gain.value = 1;
559
+ mutedByController = false;
560
+ }
561
+ try {
562
+ gainNode.disconnect();
563
+ } catch (error) {
564
+ }
545
565
  }
546
566
  pipeline.dispose();
547
567
  if (originalTrack.readyState === "live") {
@@ -1,13 +1,13 @@
1
1
  import {
2
2
  attachSpeakingDetectionToRemoteTrack,
3
3
  attachSpeakingDetectionToTrack
4
- } from "../chunk-Z36BDQQV.mjs";
5
- import "../chunk-7DOV5ZIH.mjs";
4
+ } from "../chunk-EFZHGDVK.mjs";
5
+ import "../chunk-W7DJJ6RY.mjs";
6
6
  import "../chunk-IS37FHDN.mjs";
7
- import "../chunk-47YTMONE.mjs";
7
+ import "../chunk-SIXRS6UJ.mjs";
8
8
  import "../chunk-QNQK6QFB.mjs";
9
- import "../chunk-XHU4WPFD.mjs";
10
- import "../chunk-FYX5A3OM.mjs";
9
+ import "../chunk-HNHNU5MU.mjs";
10
+ import "../chunk-MMXH5JTD.mjs";
11
11
  import "../chunk-OZ7KMC4S.mjs";
12
12
  export {
13
13
  attachSpeakingDetectionToRemoteTrack,
@@ -179,7 +179,7 @@ var DEFAULT_LIVEKIT_SPEAKING_OPTIONS = {
179
179
  noiseSuppression: DEFAULT_NOISE_SUPPRESSION_CONFIG,
180
180
  speaking: DEFAULT_SPEAKING_DETECTION_CONFIG,
181
181
  output: DEFAULT_OUTPUT_GAIN_CONFIG,
182
- muteWhenSilent: false
182
+ muteWhenSilent: true
183
183
  };
184
184
 
185
185
  // src/vad/vad-state.ts
@@ -1,10 +1,10 @@
1
1
  import {
2
2
  createAudioPipeline
3
- } from "../chunk-7DOV5ZIH.mjs";
3
+ } from "../chunk-W7DJJ6RY.mjs";
4
4
  import "../chunk-IS37FHDN.mjs";
5
5
  import "../chunk-QNQK6QFB.mjs";
6
- import "../chunk-XHU4WPFD.mjs";
7
- import "../chunk-FYX5A3OM.mjs";
6
+ import "../chunk-HNHNU5MU.mjs";
7
+ import "../chunk-MMXH5JTD.mjs";
8
8
  import "../chunk-OZ7KMC4S.mjs";
9
9
  export {
10
10
  createAudioPipeline
@@ -1,9 +1,9 @@
1
1
  import {
2
2
  createRemoteAudioMonitor
3
- } from "../chunk-47YTMONE.mjs";
3
+ } from "../chunk-SIXRS6UJ.mjs";
4
4
  import "../chunk-QNQK6QFB.mjs";
5
- import "../chunk-XHU4WPFD.mjs";
6
- import "../chunk-FYX5A3OM.mjs";
5
+ import "../chunk-HNHNU5MU.mjs";
6
+ import "../chunk-MMXH5JTD.mjs";
7
7
  import "../chunk-OZ7KMC4S.mjs";
8
8
  export {
9
9
  createRemoteAudioMonitor
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  LevelBasedVAD
3
- } from "../chunk-XHU4WPFD.mjs";
4
- import "../chunk-FYX5A3OM.mjs";
3
+ } from "../chunk-HNHNU5MU.mjs";
4
+ import "../chunk-MMXH5JTD.mjs";
5
5
  export {
6
6
  LevelBasedVAD
7
7
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tensamin/audio",
3
- "version": "0.2.6",
3
+ "version": "0.2.8",
4
4
  "author": {
5
5
  "email": "aloisianer@proton.me",
6
6
  "name": "Alois"
@@ -19,7 +19,7 @@
19
19
  "typescript": "^5.9.3"
20
20
  },
21
21
  "dependencies": {
22
- "deepfilternet3-noise-filter": "^1.1.2",
22
+ "deepfilternet3-noise-filter": "^1.1.4",
23
23
  "mitt": "^3.0.1"
24
24
  },
25
25
  "peerDependencies": {