@hivegpt/hiveai-angular 0.0.581 → 0.0.583

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/bundles/hivegpt-hiveai-angular.umd.js +420 -490
  2. package/bundles/hivegpt-hiveai-angular.umd.js.map +1 -1
  3. package/bundles/hivegpt-hiveai-angular.umd.min.js +1 -1
  4. package/bundles/hivegpt-hiveai-angular.umd.min.js.map +1 -1
  5. package/esm2015/hivegpt-hiveai-angular.js +4 -5
  6. package/esm2015/lib/components/voice-agent/services/audio-analyzer.service.js +3 -3
  7. package/esm2015/lib/components/voice-agent/services/voice-agent.service.js +195 -83
  8. package/esm2015/lib/components/voice-agent/services/websocket-voice-client.service.js +160 -49
  9. package/esm2015/lib/components/voice-agent/voice-agent.module.js +3 -5
  10. package/fesm2015/hivegpt-hiveai-angular.js +338 -416
  11. package/fesm2015/hivegpt-hiveai-angular.js.map +1 -1
  12. package/hivegpt-hiveai-angular.d.ts +3 -4
  13. package/hivegpt-hiveai-angular.d.ts.map +1 -1
  14. package/hivegpt-hiveai-angular.metadata.json +1 -1
  15. package/lib/components/voice-agent/services/audio-analyzer.service.d.ts +2 -2
  16. package/lib/components/voice-agent/services/voice-agent.service.d.ts +22 -13
  17. package/lib/components/voice-agent/services/voice-agent.service.d.ts.map +1 -1
  18. package/lib/components/voice-agent/services/websocket-voice-client.service.d.ts +30 -20
  19. package/lib/components/voice-agent/services/websocket-voice-client.service.d.ts.map +1 -1
  20. package/lib/components/voice-agent/voice-agent.module.d.ts +1 -1
  21. package/lib/components/voice-agent/voice-agent.module.d.ts.map +1 -1
  22. package/package.json +1 -1
  23. package/esm2015/lib/components/voice-agent/services/daily-voice-client.service.js +0 -305
  24. package/lib/components/voice-agent/services/daily-voice-client.service.d.ts +0 -62
  25. package/lib/components/voice-agent/services/daily-voice-client.service.d.ts.map +0 -1
@@ -5,15 +5,14 @@ import { HttpClient, HttpHeaders } from '@angular/common/http';
5
5
  import * as i0 from '@angular/core';
6
6
  import { Injectable, InjectionToken, Inject, PLATFORM_ID, Optional, NgZone, EventEmitter, Component, Injector, Output, Input, ElementRef, ChangeDetectionStrategy, ChangeDetectorRef, Renderer2, ViewContainerRef, ViewChild, ViewChildren, NgModule, Pipe } from '@angular/core';
7
7
  import { DomSanitizer } from '@angular/platform-browser';
8
- import { BehaviorSubject, of, throwError, Subject, Subscription, combineLatest } from 'rxjs';
9
- import { switchMap, catchError, filter, take, map, takeUntil, tap } from 'rxjs/operators';
8
+ import { BehaviorSubject, of, throwError, Subject, Subscription, merge, concat, timer, combineLatest } from 'rxjs';
9
+ import { switchMap, catchError, filter, take, map, takeUntil, distinctUntilChanged, startWith, tap } from 'rxjs/operators';
10
10
  import { isPlatformBrowser, CommonModule, DOCUMENT } from '@angular/common';
11
11
  import { Socket } from 'ngx-socket-io';
12
12
  import { Validators, FormBuilder, FormsModule, ReactiveFormsModule } from '@angular/forms';
13
13
  import * as SpeechSDK from 'microsoft-cognitiveservices-speech-sdk';
14
14
  import * as marked from 'marked';
15
15
  import { __awaiter } from 'tslib';
16
- import Daily from '@daily-co/daily-js';
17
16
  import { MatIconModule } from '@angular/material/icon';
18
17
  import { MatSidenavModule } from '@angular/material/sidenav';
19
18
  import { QuillModule } from 'ngx-quill';
@@ -684,8 +683,8 @@ BotsService.ctorParameters = () => [
684
683
  ];
685
684
 
686
685
  /**
687
- * Audio analyzer for waveform visualization only.
688
- * Do NOT use isUserSpeaking$ for call state; speaking state must come from Daily.js.
686
+ * Audio analyzer for waveform visualization and local (mic) speaking detection.
687
+ * VoiceAgentService may combine this with WebSocket server events for call state.
689
688
  */
690
689
  class AudioAnalyzerService {
691
690
  constructor() {
@@ -806,70 +805,77 @@ AudioAnalyzerService.decorators = [
806
805
  ];
807
806
 
808
807
  /**
809
- * WebSocket-only client for voice agent signaling.
808
+ * Native WebSocket client for voice session (signaling, transcripts, speaking hints).
810
809
  * CRITICAL: Uses native WebSocket only. NO Socket.IO, NO ngx-socket-io.
811
810
  *
812
- * Responsibilities:
813
- * - Connect to ws_url (from POST /ai/ask-voice response)
814
- * - Parse JSON messages (room_created, user_transcript, bot_transcript)
815
- * - Emit roomCreated$, userTranscript$, botTranscript$
816
- * - NO audio logic, NO mic logic. Audio is handled by Daily.js (WebRTC).
811
+ * Connects to `ws_url` from `POST {baseUrl}/ai/ask-voice-socket`.
812
+ * Parses JSON messages for transcripts and optional assistant/user speaking flags.
817
813
  */
818
814
  class WebSocketVoiceClientService {
819
- constructor() {
815
+ constructor(ngZone) {
816
+ this.ngZone = ngZone;
820
817
  this.ws = null;
821
- this.roomCreatedSubject = new Subject();
818
+ /** True when {@link disconnect} initiated the close (not counted as remote close). */
819
+ this.closeInitiatedByClient = false;
820
+ this.openedSubject = new Subject();
821
+ this.remoteCloseSubject = new Subject();
822
822
  this.userTranscriptSubject = new Subject();
823
823
  this.botTranscriptSubject = new Subject();
824
- /** Emits room_url when backend sends room_created. */
825
- this.roomCreated$ = this.roomCreatedSubject.asObservable();
826
- /** Emits user transcript updates. */
824
+ this.assistantSpeakingSubject = new Subject();
825
+ this.serverUserSpeakingSubject = new Subject();
826
+ this.audioChunkSubject = new Subject();
827
+ /** Fires once each time the WebSocket reaches OPEN. */
828
+ this.opened$ = this.openedSubject.asObservable();
829
+ /** Fires when the socket closes without a client-initiated {@link disconnect}. */
830
+ this.remoteClose$ = this.remoteCloseSubject.asObservable();
827
831
  this.userTranscript$ = this.userTranscriptSubject.asObservable();
828
- /** Emits bot transcript updates. */
829
832
  this.botTranscript$ = this.botTranscriptSubject.asObservable();
833
+ /** Assistant/bot speaking, when the server sends explicit events (see {@link handleJsonMessage}). */
834
+ this.assistantSpeaking$ = this.assistantSpeakingSubject.asObservable();
835
+ /** User speaking from server-side VAD, if provided. */
836
+ this.serverUserSpeaking$ = this.serverUserSpeakingSubject.asObservable();
837
+ /** Binary audio frames from server (when backend streams bot audio over WS). */
838
+ this.audioChunk$ = this.audioChunkSubject.asObservable();
830
839
  }
831
- /** Connect to signaling WebSocket. No audio over this connection. */
832
840
  connect(wsUrl) {
833
841
  var _a;
834
842
  if (((_a = this.ws) === null || _a === void 0 ? void 0 : _a.readyState) === WebSocket.OPEN) {
835
843
  return;
836
844
  }
837
845
  if (this.ws) {
846
+ this.closeInitiatedByClient = true;
838
847
  this.ws.close();
839
- this.ws = null;
840
848
  }
841
849
  try {
842
- this.ws = new WebSocket(wsUrl);
843
- this.ws.onmessage = (event) => {
844
- var _a;
845
- try {
846
- const msg = JSON.parse(event.data);
847
- if ((msg === null || msg === void 0 ? void 0 : msg.type) === 'room_created') {
848
- const roomUrl = ((_a = msg.room_url) !== null && _a !== void 0 ? _a : msg.roomUrl);
849
- if (typeof roomUrl === 'string') {
850
- this.roomCreatedSubject.next(roomUrl);
851
- }
852
- }
853
- else if ((msg === null || msg === void 0 ? void 0 : msg.type) === 'user_transcript' && typeof msg.text === 'string') {
854
- this.userTranscriptSubject.next({
855
- text: msg.text,
856
- final: msg.final === true,
857
- });
858
- }
859
- else if ((msg === null || msg === void 0 ? void 0 : msg.type) === 'bot_transcript' && typeof msg.text === 'string') {
860
- this.botTranscriptSubject.next(msg.text);
850
+ const socket = new WebSocket(wsUrl);
851
+ this.ws = socket;
852
+ socket.onopen = () => {
853
+ if (this.ws !== socket)
854
+ return;
855
+ this.ngZone.run(() => this.openedSubject.next());
856
+ };
857
+ socket.onmessage = (event) => {
858
+ if (this.ws !== socket)
859
+ return;
860
+ void this.handleIncomingMessage(event.data);
861
+ };
862
+ socket.onerror = () => {
863
+ this.ngZone.run(() => {
864
+ if (this.ws === socket && socket.readyState !== WebSocket.CLOSED) {
865
+ socket.close();
861
866
  }
867
+ });
868
+ };
869
+ socket.onclose = () => {
870
+ if (this.ws === socket) {
871
+ this.ws = null;
862
872
  }
863
- catch (_b) {
864
- // Ignore non-JSON or unknown messages
873
+ const client = this.closeInitiatedByClient;
874
+ this.closeInitiatedByClient = false;
875
+ if (!client) {
876
+ this.ngZone.run(() => this.remoteCloseSubject.next());
865
877
  }
866
878
  };
867
- this.ws.onerror = () => {
868
- this.disconnect();
869
- };
870
- this.ws.onclose = () => {
871
- this.ws = null;
872
- };
873
879
  }
874
880
  catch (err) {
875
881
  console.error('WebSocketVoiceClient: connect failed', err);
@@ -877,344 +883,140 @@ class WebSocketVoiceClientService {
877
883
  throw err;
878
884
  }
879
885
  }
880
- /** Disconnect and cleanup. */
881
- disconnect() {
882
- if (this.ws) {
883
- this.ws.close();
884
- this.ws = null;
885
- }
886
- }
887
- /** Whether the WebSocket is open. */
888
- get isConnected() {
889
- var _a;
890
- return ((_a = this.ws) === null || _a === void 0 ? void 0 : _a.readyState) === WebSocket.OPEN;
891
- }
892
- }
893
- WebSocketVoiceClientService.ɵprov = i0.ɵɵdefineInjectable({ factory: function WebSocketVoiceClientService_Factory() { return new WebSocketVoiceClientService(); }, token: WebSocketVoiceClientService, providedIn: "root" });
894
- WebSocketVoiceClientService.decorators = [
895
- { type: Injectable, args: [{
896
- providedIn: 'root',
897
- },] }
898
- ];
899
-
900
- /**
901
- * Daily.js WebRTC client for voice agent audio.
902
- * Responsibilities:
903
- * - Create and manage Daily CallObject
904
- * - Join Daily room using room_url
905
- * - Handle mic capture + speaker playback
906
- * - Bot speaking detection via AnalyserNode on remote track (instant)
907
- * - User speaking detection via active-speaker-change
908
- * - Expose speaking$ (bot speaking), userSpeaking$ (user speaking), micMuted$
909
- * - Expose localStream$ for waveform visualization (AudioAnalyzerService)
910
- */
911
- class DailyVoiceClientService {
912
- constructor(ngZone) {
913
- this.ngZone = ngZone;
914
- this.callObject = null;
915
- this.localStream = null;
916
- this.localSessionId = null;
917
- /** Explicit playback of remote (bot) audio; required in some browsers. */
918
- this.remoteAudioElement = null;
919
- /** AnalyserNode-based remote audio monitor for instant bot speaking detection. */
920
- this.remoteAudioContext = null;
921
- this.remoteSpeakingRAF = null;
922
- this.speakingSubject = new BehaviorSubject(false);
923
- this.userSpeakingSubject = new BehaviorSubject(false);
924
- this.micMutedSubject = new BehaviorSubject(false);
925
- this.localStreamSubject = new BehaviorSubject(null);
926
- /** True when bot (remote participant) is the active speaker. */
927
- this.speaking$ = this.speakingSubject.asObservable();
928
- /** True when user (local participant) is the active speaker. */
929
- this.userSpeaking$ = this.userSpeakingSubject.asObservable();
930
- /** True when mic is muted. */
931
- this.micMuted$ = this.micMutedSubject.asObservable();
932
- /** Emits local mic stream for waveform visualization. */
933
- this.localStream$ = this.localStreamSubject.asObservable();
934
- }
935
- /**
936
- * Connect to Daily room. Acquires mic first for waveform, then joins with audio.
937
- * @param roomUrl Daily room URL (from room_created)
938
- * @param token Optional meeting token
939
- */
940
- connect(roomUrl, token) {
886
+ handleIncomingMessage(payload) {
941
887
  return __awaiter(this, void 0, void 0, function* () {
942
- if (this.callObject) {
943
- yield this.disconnect();
888
+ if (typeof payload === 'string') {
889
+ this.handleJsonString(payload);
890
+ return;
944
891
  }
945
- try {
946
- // Get mic stream for both Daily and waveform (single capture)
947
- const stream = yield navigator.mediaDevices.getUserMedia({ audio: true });
948
- const audioTrack = stream.getAudioTracks()[0];
949
- if (!audioTrack) {
950
- stream.getTracks().forEach((t) => t.stop());
951
- throw new Error('No audio track');
952
- }
953
- this.localStream = stream;
954
- this.localStreamSubject.next(stream);
955
- // Create audio-only call object
956
- // videoSource: false = no camera, audioSource = our mic track
957
- const callObject = Daily.createCallObject({
958
- videoSource: false,
959
- audioSource: audioTrack,
960
- });
961
- this.callObject = callObject;
962
- this.setupEventHandlers(callObject);
963
- // Join room; Daily handles playback of remote (bot) audio automatically.
964
- // Only pass token when it's a non-empty string (Daily rejects undefined/non-string).
965
- const joinOptions = { url: roomUrl };
966
- if (typeof token === 'string' && token.trim() !== '') {
967
- joinOptions.token = token;
968
- }
969
- yield callObject.join(joinOptions);
970
- console.log(`[VoiceDebug] Room connected (Daily join complete) — ${new Date().toISOString()}`);
971
- const participants = callObject.participants();
972
- if (participants === null || participants === void 0 ? void 0 : participants.local) {
973
- this.localSessionId = participants.local.session_id;
974
- }
975
- // Initial mute state: Daily starts with audio on
976
- this.micMutedSubject.next(!callObject.localAudio());
892
+ if (payload instanceof ArrayBuffer) {
893
+ this.handleBinaryMessage(payload);
894
+ return;
977
895
  }
978
- catch (err) {
979
- this.cleanup();
980
- throw err;
896
+ if (payload instanceof Blob) {
897
+ const ab = yield payload.arrayBuffer();
898
+ this.handleBinaryMessage(ab);
981
899
  }
982
900
  });
983
901
  }
984
- setupEventHandlers(call) {
985
- // active-speaker-change: used ONLY for user speaking detection.
986
- // Bot speaking is detected by our own AnalyserNode (instant, no debounce).
987
- call.on('active-speaker-change', (event) => {
988
- this.ngZone.run(() => {
989
- var _a;
990
- const peerId = (_a = event === null || event === void 0 ? void 0 : event.activeSpeaker) === null || _a === void 0 ? void 0 : _a.peerId;
991
- if (!peerId || !this.localSessionId) {
992
- this.userSpeakingSubject.next(false);
993
- return;
994
- }
995
- const isLocal = peerId === this.localSessionId;
996
- this.userSpeakingSubject.next(isLocal);
997
- });
998
- });
999
- // track-started / track-stopped: set up remote audio playback + AnalyserNode monitor.
1000
- call.on('track-started', (event) => {
1001
- this.ngZone.run(() => {
1002
- var _a, _b, _c, _d;
1003
- const p = event === null || event === void 0 ? void 0 : event.participant;
1004
- const type = (_a = event === null || event === void 0 ? void 0 : event.type) !== null && _a !== void 0 ? _a : (_b = event === null || event === void 0 ? void 0 : event.track) === null || _b === void 0 ? void 0 : _b.kind;
1005
- const track = event === null || event === void 0 ? void 0 : event.track;
1006
- if (p && !p.local && type === 'audio') {
1007
- console.log(`[VoiceDebug] Got audio track from backend (track-started) — readyState=${track === null || track === void 0 ? void 0 : track.readyState}, muted=${track === null || track === void 0 ? void 0 : track.muted} — ${new Date().toISOString()}`);
1008
- const audioTrack = track !== null && track !== void 0 ? track : (_d = (_c = p.tracks) === null || _c === void 0 ? void 0 : _c.audio) === null || _d === void 0 ? void 0 : _d.track;
1009
- if (audioTrack && typeof audioTrack === 'object') {
1010
- this.playRemoteTrack(audioTrack);
1011
- this.monitorRemoteAudio(audioTrack);
1012
- }
1013
- }
1014
- });
1015
- });
1016
- call.on('track-stopped', (event) => {
1017
- this.ngZone.run(() => {
1018
- var _a, _b;
1019
- const p = event === null || event === void 0 ? void 0 : event.participant;
1020
- const type = (_a = event === null || event === void 0 ? void 0 : event.type) !== null && _a !== void 0 ? _a : (_b = event === null || event === void 0 ? void 0 : event.track) === null || _b === void 0 ? void 0 : _b.kind;
1021
- if (p && !p.local && type === 'audio') {
1022
- this.stopRemoteAudioMonitor();
1023
- this.stopRemoteAudio();
1024
- }
1025
- });
1026
- });
1027
- call.on('left-meeting', () => {
1028
- this.ngZone.run(() => this.cleanup());
1029
- });
1030
- call.on('error', (event) => {
1031
- this.ngZone.run(() => {
1032
- var _a;
1033
- console.error('DailyVoiceClient: Daily error', (_a = event === null || event === void 0 ? void 0 : event.errorMsg) !== null && _a !== void 0 ? _a : event);
1034
- this.cleanup();
1035
- });
1036
- });
1037
- }
1038
- /**
1039
- * Play remote (bot) audio track via a dedicated audio element.
1040
- * Required in many browsers where Daily's internal playback does not output to speakers.
1041
- */
1042
- playRemoteTrack(track) {
1043
- this.stopRemoteAudio();
902
+ handleJsonString(jsonText) {
1044
903
  try {
1045
- console.log(`[VoiceDebug] playRemoteTrack called — track.readyState=${track.readyState}, track.muted=${track.muted} — ${new Date().toISOString()}`);
1046
- track.onunmute = () => {
1047
- console.log(`[VoiceDebug] Remote audio track UNMUTED (audio data arriving) — ${new Date().toISOString()}`);
1048
- };
1049
- const stream = new MediaStream([track]);
1050
- const audio = new Audio();
1051
- audio.autoplay = true;
1052
- audio.srcObject = stream;
1053
- this.remoteAudioElement = audio;
1054
- audio.onplaying = () => {
1055
- console.log(`[VoiceDebug] Audio element PLAYING (browser started playback) — ${new Date().toISOString()}`);
1056
- };
1057
- let firstTimeUpdate = true;
1058
- audio.ontimeupdate = () => {
1059
- if (firstTimeUpdate) {
1060
- firstTimeUpdate = false;
1061
- console.log(`[VoiceDebug] Audio element first TIMEUPDATE (actual audio output) — ${new Date().toISOString()}`);
1062
- }
1063
- };
1064
- const p = audio.play();
1065
- if (p && typeof p.then === 'function') {
1066
- p.then(() => {
1067
- console.log(`[VoiceDebug] audio.play() resolved — ${new Date().toISOString()}`);
1068
- }).catch((err) => {
1069
- console.warn('DailyVoiceClient: remote audio play failed (may need user gesture)', err);
1070
- });
1071
- }
904
+ const msg = JSON.parse(jsonText);
905
+ this.ngZone.run(() => this.handleJsonMessage(msg));
1072
906
  }
1073
- catch (err) {
1074
- console.warn('DailyVoiceClient: failed to create remote audio element', err);
907
+ catch (_a) {
908
+ // Ignore non-JSON
1075
909
  }
1076
910
  }
1077
- /**
1078
- * Monitor remote audio track energy via AnalyserNode.
1079
- * Polls at ~60fps and flips speakingSubject based on actual audio energy.
1080
- */
1081
- monitorRemoteAudio(track) {
1082
- this.stopRemoteAudioMonitor();
911
+ handleBinaryMessage(buffer) {
912
+ // Some backends wrap JSON events inside binary WS frames.
913
+ const maybeText = this.tryDecodeUtf8(buffer);
914
+ if (maybeText !== null) {
915
+ this.handleJsonString(maybeText);
916
+ return;
917
+ }
918
+ // Otherwise treat binary as streamed assistant audio.
919
+ this.ngZone.run(() => this.audioChunkSubject.next(buffer));
920
+ }
921
+ tryDecodeUtf8(buffer) {
1083
922
  try {
1084
- const ctx = new AudioContext();
1085
- const source = ctx.createMediaStreamSource(new MediaStream([track]));
1086
- const analyser = ctx.createAnalyser();
1087
- analyser.fftSize = 256;
1088
- source.connect(analyser);
1089
- this.remoteAudioContext = ctx;
1090
- const dataArray = new Uint8Array(analyser.frequencyBinCount);
1091
- const THRESHOLD = 5;
1092
- const SILENCE_MS = 1500;
1093
- let lastSoundTime = 0;
1094
- let isSpeaking = false;
1095
- const poll = () => {
1096
- if (!this.remoteAudioContext)
1097
- return;
1098
- analyser.getByteFrequencyData(dataArray);
1099
- let sum = 0;
1100
- for (let i = 0; i < dataArray.length; i++) {
1101
- sum += dataArray[i];
1102
- }
1103
- const avg = sum / dataArray.length;
1104
- const now = Date.now();
1105
- if (avg > THRESHOLD) {
1106
- lastSoundTime = now;
1107
- if (!isSpeaking) {
1108
- isSpeaking = true;
1109
- console.log(`[VoiceDebug] Bot audio energy detected (speaking=true) — avg=${avg.toFixed(1)} — ${new Date().toISOString()}`);
1110
- this.ngZone.run(() => {
1111
- this.userSpeakingSubject.next(false);
1112
- this.speakingSubject.next(true);
1113
- });
1114
- }
1115
- }
1116
- else if (isSpeaking && now - lastSoundTime > SILENCE_MS) {
1117
- isSpeaking = false;
1118
- console.log(`[VoiceDebug] Bot audio silence detected (speaking=false) — ${new Date().toISOString()}`);
1119
- this.ngZone.run(() => this.speakingSubject.next(false));
1120
- }
1121
- this.remoteSpeakingRAF = requestAnimationFrame(poll);
1122
- };
1123
- this.remoteSpeakingRAF = requestAnimationFrame(poll);
923
+ const text = new TextDecoder('utf-8', { fatal: true }).decode(buffer);
924
+ const trimmed = text.trim();
925
+ if (!trimmed || (trimmed[0] !== '{' && trimmed[0] !== '[')) {
926
+ return null;
927
+ }
928
+ return trimmed;
1124
929
  }
1125
- catch (err) {
1126
- console.warn('DailyVoiceClient: failed to create remote audio monitor', err);
930
+ catch (_a) {
931
+ return null;
1127
932
  }
1128
933
  }
1129
- stopRemoteAudioMonitor() {
1130
- if (this.remoteSpeakingRAF) {
1131
- cancelAnimationFrame(this.remoteSpeakingRAF);
1132
- this.remoteSpeakingRAF = null;
934
+ handleJsonMessage(msg) {
935
+ const type = msg.type;
936
+ const typeStr = typeof type === 'string' ? type : '';
937
+ if (typeStr === 'session_ready' || typeStr === 'connected' || typeStr === 'voice_session_started') {
938
+ return;
1133
939
  }
1134
- if (this.remoteAudioContext) {
1135
- this.remoteAudioContext.close().catch(() => { });
1136
- this.remoteAudioContext = null;
940
+ if (typeStr === 'assistant_speaking' ||
941
+ typeStr === 'bot_speaking') {
942
+ if (msg.active === true || msg.speaking === true) {
943
+ this.assistantSpeakingSubject.next(true);
944
+ }
945
+ else if (msg.active === false || msg.speaking === false) {
946
+ this.assistantSpeakingSubject.next(false);
947
+ }
948
+ return;
1137
949
  }
1138
- }
1139
- stopRemoteAudio() {
1140
- if (this.remoteAudioElement) {
1141
- try {
1142
- this.remoteAudioElement.pause();
1143
- this.remoteAudioElement.srcObject = null;
950
+ if (typeStr === 'user_speaking') {
951
+ if (msg.active === true || msg.speaking === true) {
952
+ this.serverUserSpeakingSubject.next(true);
953
+ }
954
+ else if (msg.active === false || msg.speaking === false) {
955
+ this.serverUserSpeakingSubject.next(false);
1144
956
  }
1145
- catch (_) { }
1146
- this.remoteAudioElement = null;
957
+ return;
1147
958
  }
1148
- }
1149
- /** Set mic muted state. */
1150
- setMuted(muted) {
1151
- if (!this.callObject)
959
+ if (typeStr === 'input_audio_buffer.speech_started') {
960
+ this.serverUserSpeakingSubject.next(true);
961
+ return;
962
+ }
963
+ if (typeStr === 'input_audio_buffer.speech_stopped') {
964
+ this.serverUserSpeakingSubject.next(false);
965
+ return;
966
+ }
967
+ if (typeStr === 'response.audio.delta') {
968
+ this.assistantSpeakingSubject.next(true);
969
+ return;
970
+ }
971
+ if (typeStr === 'response.audio.done' ||
972
+ typeStr === 'response.output_audio.done') {
973
+ this.assistantSpeakingSubject.next(false);
974
+ return;
975
+ }
976
+ if (typeStr === 'user_transcript' && typeof msg.text === 'string') {
977
+ this.userTranscriptSubject.next({
978
+ text: msg.text,
979
+ final: msg.final === true,
980
+ });
1152
981
  return;
1153
- this.callObject.setLocalAudio(!muted);
1154
- this.micMutedSubject.next(muted);
982
+ }
983
+ if (typeStr === 'bot_transcript' && typeof msg.text === 'string') {
984
+ this.botTranscriptSubject.next(msg.text);
985
+ }
1155
986
  }
1156
- /** Disconnect and cleanup. */
1157
987
  disconnect() {
1158
- return __awaiter(this, void 0, void 0, function* () {
1159
- if (!this.callObject) {
1160
- this.cleanup();
1161
- return;
1162
- }
1163
- try {
1164
- yield this.callObject.leave();
1165
- }
1166
- catch (e) {
1167
- // ignore
1168
- }
1169
- this.cleanup();
1170
- });
1171
- }
1172
- cleanup() {
1173
- this.stopRemoteAudioMonitor();
1174
- this.stopRemoteAudio();
1175
- if (this.callObject) {
1176
- this.callObject.destroy().catch(() => { });
1177
- this.callObject = null;
1178
- }
1179
- if (this.localStream) {
1180
- this.localStream.getTracks().forEach((t) => t.stop());
1181
- this.localStream = null;
988
+ if (!this.ws) {
989
+ return;
1182
990
  }
1183
- this.localSessionId = null;
1184
- this.speakingSubject.next(false);
1185
- this.userSpeakingSubject.next(false);
1186
- this.localStreamSubject.next(null);
1187
- // Keep last micMuted state; will reset on next connect
991
+ this.closeInitiatedByClient = true;
992
+ this.ws.close();
993
+ }
994
+ get isConnected() {
995
+ var _a;
996
+ return ((_a = this.ws) === null || _a === void 0 ? void 0 : _a.readyState) === WebSocket.OPEN;
1188
997
  }
1189
998
  }
1190
- DailyVoiceClientService.ɵprov = i0.ɵɵdefineInjectable({ factory: function DailyVoiceClientService_Factory() { return new DailyVoiceClientService(i0.ɵɵinject(i0.NgZone)); }, token: DailyVoiceClientService, providedIn: "root" });
1191
- DailyVoiceClientService.decorators = [
999
+ WebSocketVoiceClientService.ɵprov = i0.ɵɵdefineInjectable({ factory: function WebSocketVoiceClientService_Factory() { return new WebSocketVoiceClientService(i0.ɵɵinject(i0.NgZone)); }, token: WebSocketVoiceClientService, providedIn: "root" });
1000
+ WebSocketVoiceClientService.decorators = [
1192
1001
  { type: Injectable, args: [{
1193
1002
  providedIn: 'root',
1194
1003
  },] }
1195
1004
  ];
1196
- DailyVoiceClientService.ctorParameters = () => [
1005
+ WebSocketVoiceClientService.ctorParameters = () => [
1197
1006
  { type: NgZone }
1198
1007
  ];
1199
1008
 
1200
1009
  /**
1201
- * Voice agent orchestrator. Coordinates WebSocket (signaling) and Daily.js (WebRTC audio).
1202
- *
1203
- * CRITICAL: This service must NEVER use Socket.IO or ngx-socket-io. Voice flow uses only:
1204
- * - Native WebSocket (WebSocketVoiceClientService) for signaling (room_created, transcripts)
1205
- * - Daily.js (DailyVoiceClientService) for WebRTC audio. Audio does NOT flow over WebSocket.
1206
- *
1207
- * - Maintains callState, statusText, duration, isMicMuted, isUserSpeaking, audioLevels
1208
- * - Uses WebSocket for room_created and transcripts only (no audio)
1209
- * - Uses Daily.js for all audio, mic, and real-time speaking detection
1010
+ * Voice agent orchestrator: single WebSocket (`ws_url` from POST /ai/ask-voice-socket)
1011
+ * for session events, transcripts, and optional speaking hints; local mic for capture
1012
+ * and waveform only (no Daily/WebRTC room).
1210
1013
  */
1211
1014
  class VoiceAgentService {
1212
- constructor(audioAnalyzer, wsClient, dailyClient, platformTokenRefresh,
1015
+ constructor(audioAnalyzer, wsClient, platformTokenRefresh,
1213
1016
  /** `Object` not `object` — ngc metadata collection rejects the `object` type in DI params. */
1214
1017
  platformId) {
1215
1018
  this.audioAnalyzer = audioAnalyzer;
1216
1019
  this.wsClient = wsClient;
1217
- this.dailyClient = dailyClient;
1218
1020
  this.platformTokenRefresh = platformTokenRefresh;
1219
1021
  this.platformId = platformId;
1220
1022
  this.callStateSubject = new BehaviorSubject('idle');
@@ -1227,6 +1029,11 @@ class VoiceAgentService {
1227
1029
  this.botTranscriptSubject = new Subject();
1228
1030
  this.callStartTime = 0;
1229
1031
  this.durationInterval = null;
1032
+ this.localMicStream = null;
1033
+ this.remoteAudioContext = null;
1034
+ this.pendingRemoteAudio = [];
1035
+ this.remoteAudioPlaying = false;
1036
+ this.endCall$ = new Subject();
1230
1037
  this.subscriptions = new Subscription();
1231
1038
  this.destroy$ = new Subject();
1232
1039
  this.callState$ = this.callStateSubject.asObservable();
@@ -1237,8 +1044,13 @@ class VoiceAgentService {
1237
1044
  this.audioLevels$ = this.audioLevelsSubject.asObservable();
1238
1045
  this.userTranscript$ = this.userTranscriptSubject.asObservable();
1239
1046
  this.botTranscript$ = this.botTranscriptSubject.asObservable();
1240
- // Waveform visualization only - do NOT use for speaking state
1241
1047
  this.subscriptions.add(this.audioAnalyzer.audioLevels$.subscribe((levels) => this.audioLevelsSubject.next(levels)));
1048
+ this.subscriptions.add(this.wsClient.remoteClose$
1049
+ .pipe(takeUntil(this.destroy$))
1050
+ .subscribe(() => void this.handleRemoteClose()));
1051
+ this.subscriptions.add(this.wsClient.audioChunk$
1052
+ .pipe(takeUntil(this.destroy$))
1053
+ .subscribe((chunk) => this.enqueueRemoteAudio(chunk)));
1242
1054
  }
1243
1055
  ngOnDestroy() {
1244
1056
  this.destroy$.next();
@@ -1249,11 +1061,13 @@ class VoiceAgentService {
1249
1061
  resetToIdle() {
1250
1062
  if (this.callStateSubject.value === 'idle')
1251
1063
  return;
1064
+ this.endCall$.next();
1252
1065
  this.stopDurationTimer();
1066
+ this.callStartTime = 0;
1253
1067
  this.audioAnalyzer.stop();
1068
+ this.stopLocalMic();
1069
+ this.resetRemoteAudioPlayback();
1254
1070
  this.wsClient.disconnect();
1255
- // Fire-and-forget: Daily disconnect is async; connect() will await if needed
1256
- void this.dailyClient.disconnect();
1257
1071
  this.callStateSubject.next('idle');
1258
1072
  this.statusTextSubject.next('');
1259
1073
  this.durationSubject.next('0:00');
@@ -1268,9 +1082,6 @@ class VoiceAgentService {
1268
1082
  this.callStateSubject.next('connecting');
1269
1083
  this.statusTextSubject.next('Connecting...');
1270
1084
  let accessToken = token;
1271
- // Align with chat drawer token handling: always delegate to
1272
- // PlatformTokenRefreshService when we have a usersApiUrl, so it can
1273
- // fall back to stored tokens even if the caller passed an empty token.
1274
1085
  if (usersApiUrl && isPlatformBrowser(this.platformId)) {
1275
1086
  try {
1276
1087
  const ensured = yield this.platformTokenRefresh
@@ -1286,7 +1097,7 @@ class VoiceAgentService {
1286
1097
  }
1287
1098
  }
1288
1099
  const baseUrl = apiUrl.replace(/\/$/, '');
1289
- const postUrl = `${baseUrl}/ai/ask-voice`;
1100
+ const postUrl = `${baseUrl}/ai/ask-voice-socket`;
1290
1101
  const headers = {
1291
1102
  'Content-Type': 'application/json',
1292
1103
  Authorization: `Bearer ${accessToken}`,
@@ -1298,7 +1109,6 @@ class VoiceAgentService {
1298
1109
  eventToken,
1299
1110
  'ngrok-skip-browser-warning': 'true',
1300
1111
  };
1301
- // POST to get ws_url for signaling
1302
1112
  const res = yield fetch(postUrl, {
1303
1113
  method: 'POST',
1304
1114
  headers,
@@ -1312,33 +1122,21 @@ class VoiceAgentService {
1312
1122
  throw new Error(`HTTP ${res.status}`);
1313
1123
  }
1314
1124
  const json = yield res.json();
1315
- const wsUrl = json === null || json === void 0 ? void 0 : json.rn_ws_url;
1316
- if (!wsUrl || typeof wsUrl !== 'string') {
1125
+ const wsUrl = (typeof (json === null || json === void 0 ? void 0 : json.ws_url) === 'string' && json.ws_url) ||
1126
+ (typeof (json === null || json === void 0 ? void 0 : json.rn_ws_url) === 'string' && json.rn_ws_url);
1127
+ if (!wsUrl) {
1317
1128
  throw new Error('No ws_url in response');
1318
1129
  }
1319
- // Subscribe to room_created BEFORE connecting to avoid race
1320
- this.wsClient.roomCreated$
1321
- .pipe(take(1), takeUntil(this.destroy$))
1322
- .subscribe((roomUrl) => __awaiter(this, void 0, void 0, function* () {
1323
- try {
1324
- yield this.onRoomCreated(roomUrl);
1325
- }
1326
- catch (err) {
1327
- console.error('Daily join failed:', err);
1328
- this.callStateSubject.next('ended');
1329
- this.statusTextSubject.next('Connection failed');
1330
- yield this.disconnect();
1331
- throw err;
1332
- }
1333
- }));
1334
- // Forward transcripts from WebSocket
1130
+ const untilCallEnds$ = merge(this.destroy$, this.endCall$);
1335
1131
  this.subscriptions.add(this.wsClient.userTranscript$
1336
- .pipe(takeUntil(this.destroy$))
1132
+ .pipe(takeUntil(untilCallEnds$))
1337
1133
  .subscribe((t) => this.userTranscriptSubject.next(t)));
1338
1134
  this.subscriptions.add(this.wsClient.botTranscript$
1339
- .pipe(takeUntil(this.destroy$))
1135
+ .pipe(takeUntil(untilCallEnds$))
1340
1136
  .subscribe((t) => this.botTranscriptSubject.next(t)));
1341
- // Connect signaling WebSocket (no audio over WS)
1137
+ this.subscriptions.add(this.wsClient.opened$
1138
+ .pipe(takeUntil(untilCallEnds$), take(1))
1139
+ .subscribe(() => void this.onWebsocketOpened()));
1342
1140
  this.wsClient.connect(wsUrl);
1343
1141
  }
1344
1142
  catch (error) {
@@ -1350,59 +1148,185 @@ class VoiceAgentService {
1350
1148
  }
1351
1149
  });
1352
1150
  }
1353
- onRoomCreated(roomUrl) {
1151
+ onWebsocketOpened() {
1354
1152
  return __awaiter(this, void 0, void 0, function* () {
1355
- // Connect Daily.js for WebRTC audio
1356
- yield this.dailyClient.connect(roomUrl);
1357
- // Waveform: use local mic stream from Daily client
1358
- this.dailyClient.localStream$
1359
- .pipe(filter((s) => s != null), take(1))
1360
- .subscribe((stream) => {
1361
- this.audioAnalyzer.start(stream);
1362
- });
1363
- this.subscriptions.add(this.dailyClient.userSpeaking$.subscribe((s) => this.isUserSpeakingSubject.next(s)));
1364
- this.subscriptions.add(combineLatest([
1365
- this.dailyClient.speaking$,
1366
- this.dailyClient.userSpeaking$,
1367
- ]).subscribe(([bot, user]) => {
1368
- const current = this.callStateSubject.value;
1369
- if (current === 'connecting' && !bot) {
1370
- return;
1371
- }
1372
- if (current === 'connecting' && bot) {
1153
+ if (this.callStateSubject.value !== 'connecting') {
1154
+ return;
1155
+ }
1156
+ try {
1157
+ yield this.startLocalMic();
1158
+ this.statusTextSubject.next('Connected');
1159
+ this.callStateSubject.next('connected');
1160
+ this.wireSpeakingState();
1161
+ }
1162
+ catch (err) {
1163
+ console.error('[HiveGpt Voice] Mic or session setup failed', err);
1164
+ this.callStateSubject.next('ended');
1165
+ this.statusTextSubject.next('Microphone unavailable');
1166
+ yield this.disconnect();
1167
+ }
1168
+ });
1169
+ }
1170
+ wireSpeakingState() {
1171
+ const untilCallEnds$ = merge(this.destroy$, this.endCall$);
1172
+ const transcriptDrivenAssistant$ = this.wsClient.botTranscript$.pipe(switchMap(() => concat(of(true), timer(800).pipe(map(() => false)))), distinctUntilChanged());
1173
+ const assistantTalking$ = merge(this.wsClient.assistantSpeaking$, transcriptDrivenAssistant$).pipe(distinctUntilChanged(), startWith(false));
1174
+ const userTalking$ = combineLatest([
1175
+ this.audioAnalyzer.isUserSpeaking$,
1176
+ this.wsClient.serverUserSpeaking$.pipe(startWith(false)),
1177
+ ]).pipe(map(([local, server]) => local || server), distinctUntilChanged(), startWith(false));
1178
+ this.subscriptions.add(combineLatest([assistantTalking$, userTalking$])
1179
+ .pipe(takeUntil(untilCallEnds$))
1180
+ .subscribe(([bot, user]) => {
1181
+ const current = this.callStateSubject.value;
1182
+ if (user) {
1183
+ this.isUserSpeakingSubject.next(true);
1184
+ this.callStateSubject.next('listening');
1185
+ }
1186
+ else {
1187
+ this.isUserSpeakingSubject.next(false);
1188
+ }
1189
+ if (user) {
1190
+ return;
1191
+ }
1192
+ if (bot) {
1193
+ if (this.callStartTime === 0) {
1373
1194
  this.callStartTime = Date.now();
1374
1195
  this.startDurationTimer();
1375
- this.callStateSubject.next('talking');
1376
- return;
1377
- }
1378
- if (user) {
1379
- this.callStateSubject.next('listening');
1380
1196
  }
1381
- else if (bot) {
1382
- this.callStateSubject.next('talking');
1197
+ this.callStateSubject.next('talking');
1198
+ }
1199
+ else if (current === 'talking' || current === 'listening') {
1200
+ this.callStateSubject.next('connected');
1201
+ }
1202
+ }));
1203
+ }
1204
+ startLocalMic() {
1205
+ return __awaiter(this, void 0, void 0, function* () {
1206
+ this.stopLocalMic();
1207
+ const stream = yield navigator.mediaDevices.getUserMedia({ audio: true });
1208
+ const track = stream.getAudioTracks()[0];
1209
+ if (!track) {
1210
+ stream.getTracks().forEach((t) => t.stop());
1211
+ throw new Error('No audio track');
1212
+ }
1213
+ this.localMicStream = stream;
1214
+ this.isMicMutedSubject.next(!track.enabled);
1215
+ this.audioAnalyzer.start(stream);
1216
+ });
1217
+ }
1218
+ stopLocalMic() {
1219
+ if (this.localMicStream) {
1220
+ this.localMicStream.getTracks().forEach((t) => t.stop());
1221
+ this.localMicStream = null;
1222
+ }
1223
+ }
1224
+ enqueueRemoteAudio(chunk) {
1225
+ this.pendingRemoteAudio.push(chunk.slice(0));
1226
+ if (!this.remoteAudioPlaying) {
1227
+ void this.playRemoteAudioQueue();
1228
+ }
1229
+ }
1230
+ playRemoteAudioQueue() {
1231
+ return __awaiter(this, void 0, void 0, function* () {
1232
+ this.remoteAudioPlaying = true;
1233
+ const context = this.getOrCreateRemoteAudioContext();
1234
+ while (this.pendingRemoteAudio.length > 0) {
1235
+ const chunk = this.pendingRemoteAudio.shift();
1236
+ if (!chunk)
1237
+ continue;
1238
+ try {
1239
+ const decoded = yield this.decodeAudioChunk(context, chunk);
1240
+ this.assistantAudioStarted();
1241
+ yield this.playDecodedBuffer(context, decoded);
1383
1242
  }
1384
- else if (current === 'talking' || current === 'listening') {
1385
- this.callStateSubject.next('connected');
1243
+ catch (_a) {
1244
+ // Ignore undecodable chunks; server may mix non-audio binary events.
1386
1245
  }
1387
- }));
1388
- this.subscriptions.add(this.dailyClient.micMuted$.subscribe((muted) => this.isMicMutedSubject.next(muted)));
1389
- this.statusTextSubject.next('Connecting...');
1246
+ }
1247
+ this.remoteAudioPlaying = false;
1248
+ this.assistantAudioStopped();
1249
+ });
1250
+ }
1251
+ getOrCreateRemoteAudioContext() {
1252
+ if (!this.remoteAudioContext || this.remoteAudioContext.state === 'closed') {
1253
+ this.remoteAudioContext = new AudioContext();
1254
+ }
1255
+ if (this.remoteAudioContext.state === 'suspended') {
1256
+ void this.remoteAudioContext.resume();
1257
+ }
1258
+ return this.remoteAudioContext;
1259
+ }
1260
+ decodeAudioChunk(context, chunk) {
1261
+ return new Promise((resolve, reject) => {
1262
+ context.decodeAudioData(chunk.slice(0), resolve, reject);
1263
+ });
1264
+ }
1265
+ playDecodedBuffer(context, buffer) {
1266
+ return new Promise((resolve) => {
1267
+ const source = context.createBufferSource();
1268
+ source.buffer = buffer;
1269
+ source.connect(context.destination);
1270
+ source.onended = () => resolve();
1271
+ source.start();
1272
+ });
1273
+ }
1274
+ assistantAudioStarted() {
1275
+ if (this.callStartTime === 0) {
1276
+ this.callStartTime = Date.now();
1277
+ this.startDurationTimer();
1278
+ }
1279
+ this.callStateSubject.next('talking');
1280
+ }
1281
+ assistantAudioStopped() {
1282
+ if (this.callStateSubject.value === 'talking') {
1283
+ this.callStateSubject.next('connected');
1284
+ }
1285
+ }
1286
+ resetRemoteAudioPlayback() {
1287
+ this.pendingRemoteAudio = [];
1288
+ this.remoteAudioPlaying = false;
1289
+ if (this.remoteAudioContext && this.remoteAudioContext.state !== 'closed') {
1290
+ this.remoteAudioContext.close().catch(() => { });
1291
+ }
1292
+ this.remoteAudioContext = null;
1293
+ }
1294
+ handleRemoteClose() {
1295
+ return __awaiter(this, void 0, void 0, function* () {
1296
+ const state = this.callStateSubject.value;
1297
+ if (state === 'idle' || state === 'ended')
1298
+ return;
1299
+ this.endCall$.next();
1300
+ this.stopDurationTimer();
1301
+ this.callStartTime = 0;
1302
+ this.audioAnalyzer.stop();
1303
+ this.stopLocalMic();
1304
+ this.resetRemoteAudioPlayback();
1305
+ this.callStateSubject.next('ended');
1306
+ this.statusTextSubject.next('Connection lost');
1390
1307
  });
1391
1308
  }
1392
1309
  disconnect() {
1393
1310
  return __awaiter(this, void 0, void 0, function* () {
1311
+ this.endCall$.next();
1394
1312
  this.stopDurationTimer();
1313
+ this.callStartTime = 0;
1395
1314
  this.audioAnalyzer.stop();
1396
- // Daily first, then WebSocket
1397
- yield this.dailyClient.disconnect();
1315
+ this.stopLocalMic();
1316
+ this.resetRemoteAudioPlayback();
1398
1317
  this.wsClient.disconnect();
1399
1318
  this.callStateSubject.next('ended');
1400
1319
  this.statusTextSubject.next('Call Ended');
1401
1320
  });
1402
1321
  }
1403
1322
  toggleMic() {
1404
- const current = this.isMicMutedSubject.value;
1405
- this.dailyClient.setMuted(!current);
1323
+ var _a;
1324
+ const nextMuted = !this.isMicMutedSubject.value;
1325
+ const track = (_a = this.localMicStream) === null || _a === void 0 ? void 0 : _a.getAudioTracks()[0];
1326
+ if (track) {
1327
+ track.enabled = !nextMuted;
1328
+ }
1329
+ this.isMicMutedSubject.next(nextMuted);
1406
1330
  }
1407
1331
  startDurationTimer() {
1408
1332
  const updateDuration = () => {
@@ -1423,7 +1347,7 @@ class VoiceAgentService {
1423
1347
  }
1424
1348
  }
1425
1349
  }
1426
- VoiceAgentService.ɵprov = i0.ɵɵdefineInjectable({ factory: function VoiceAgentService_Factory() { return new VoiceAgentService(i0.ɵɵinject(AudioAnalyzerService), i0.ɵɵinject(WebSocketVoiceClientService), i0.ɵɵinject(DailyVoiceClientService), i0.ɵɵinject(PlatformTokenRefreshService), i0.ɵɵinject(i0.PLATFORM_ID)); }, token: VoiceAgentService, providedIn: "root" });
1350
+ VoiceAgentService.ɵprov = i0.ɵɵdefineInjectable({ factory: function VoiceAgentService_Factory() { return new VoiceAgentService(i0.ɵɵinject(AudioAnalyzerService), i0.ɵɵinject(WebSocketVoiceClientService), i0.ɵɵinject(PlatformTokenRefreshService), i0.ɵɵinject(i0.PLATFORM_ID)); }, token: VoiceAgentService, providedIn: "root" });
1427
1351
  VoiceAgentService.decorators = [
1428
1352
  { type: Injectable, args: [{
1429
1353
  providedIn: 'root',
@@ -1432,7 +1356,6 @@ VoiceAgentService.decorators = [
1432
1356
  VoiceAgentService.ctorParameters = () => [
1433
1357
  { type: AudioAnalyzerService },
1434
1358
  { type: WebSocketVoiceClientService },
1435
- { type: DailyVoiceClientService },
1436
1359
  { type: PlatformTokenRefreshService },
1437
1360
  { type: Object, decorators: [{ type: Inject, args: [PLATFORM_ID,] }] }
1438
1361
  ];
@@ -5472,7 +5395,7 @@ ChatBotComponent.propDecorators = {
5472
5395
  };
5473
5396
 
5474
5397
  /**
5475
- * Voice agent module. Uses native WebSocket + Daily.js only.
5398
+ * Voice agent module. Uses native WebSocket for the voice session.
5476
5399
  * Does NOT use Socket.IO or ngx-socket-io.
5477
5400
  */
5478
5401
  class VoiceAgentModule {
@@ -5488,8 +5411,7 @@ VoiceAgentModule.decorators = [
5488
5411
  providers: [
5489
5412
  VoiceAgentService,
5490
5413
  AudioAnalyzerService,
5491
- WebSocketVoiceClientService,
5492
- DailyVoiceClientService
5414
+ WebSocketVoiceClientService
5493
5415
  ],
5494
5416
  exports: [
5495
5417
  VoiceAgentModalComponent
@@ -5760,5 +5682,5 @@ HiveGptModule.decorators = [
5760
5682
  * Generated bundle index. Do not edit.
5761
5683
  */
5762
5684
 
5763
- export { AudioAnalyzerService, ChatBotComponent, ChatDrawerComponent, HIVEGPT_AUTH_STORAGE_KEY, HiveGptModule, PlatformTokenRefreshService, VOICE_MODAL_CLOSE_CALLBACK, VOICE_MODAL_CONFIG, VoiceAgentModalComponent, VoiceAgentModule, VoiceAgentService, eClassificationType, hiveGptAuthStorageKeyFactory, BotsService as ɵa, SocketService as ɵb, ConversationService as ɵc, NotificationSocket as ɵd, TranslationService as ɵe, WebSocketVoiceClientService as ɵf, DailyVoiceClientService as ɵg, VideoPlayerComponent as ɵh, SafeHtmlPipe as ɵi, BotHtmlEditorComponent as ɵj };
5685
+ export { AudioAnalyzerService, ChatBotComponent, ChatDrawerComponent, HIVEGPT_AUTH_STORAGE_KEY, HiveGptModule, PlatformTokenRefreshService, VOICE_MODAL_CLOSE_CALLBACK, VOICE_MODAL_CONFIG, VoiceAgentModalComponent, VoiceAgentModule, VoiceAgentService, eClassificationType, hiveGptAuthStorageKeyFactory, BotsService as ɵa, SocketService as ɵb, ConversationService as ɵc, NotificationSocket as ɵd, TranslationService as ɵe, WebSocketVoiceClientService as ɵf, VideoPlayerComponent as ɵg, SafeHtmlPipe as ɵh, BotHtmlEditorComponent as ɵi };
5764
5686
  //# sourceMappingURL=hivegpt-hiveai-angular.js.map