@aj-archipelago/cortex 1.3.5 → 1.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/helper-apps/cortex-autogen/agents.py +31 -2
  2. package/helper-apps/cortex-realtime-voice-server/.env.sample +6 -0
  3. package/helper-apps/cortex-realtime-voice-server/README.md +22 -0
  4. package/helper-apps/cortex-realtime-voice-server/bun.lockb +0 -0
  5. package/helper-apps/cortex-realtime-voice-server/client/bun.lockb +0 -0
  6. package/helper-apps/cortex-realtime-voice-server/client/index.html +12 -0
  7. package/helper-apps/cortex-realtime-voice-server/client/package.json +65 -0
  8. package/helper-apps/cortex-realtime-voice-server/client/postcss.config.js +6 -0
  9. package/helper-apps/cortex-realtime-voice-server/client/public/favicon.ico +0 -0
  10. package/helper-apps/cortex-realtime-voice-server/client/public/index.html +43 -0
  11. package/helper-apps/cortex-realtime-voice-server/client/public/logo192.png +0 -0
  12. package/helper-apps/cortex-realtime-voice-server/client/public/logo512.png +0 -0
  13. package/helper-apps/cortex-realtime-voice-server/client/public/manifest.json +25 -0
  14. package/helper-apps/cortex-realtime-voice-server/client/public/robots.txt +3 -0
  15. package/helper-apps/cortex-realtime-voice-server/client/public/sounds/connect.mp3 +0 -0
  16. package/helper-apps/cortex-realtime-voice-server/client/public/sounds/disconnect.mp3 +0 -0
  17. package/helper-apps/cortex-realtime-voice-server/client/src/App.test.tsx +9 -0
  18. package/helper-apps/cortex-realtime-voice-server/client/src/App.tsx +126 -0
  19. package/helper-apps/cortex-realtime-voice-server/client/src/SettingsModal.tsx +207 -0
  20. package/helper-apps/cortex-realtime-voice-server/client/src/chat/Chat.tsx +553 -0
  21. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubble.tsx +22 -0
  22. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubbleLeft.tsx +22 -0
  23. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubbleRight.tsx +21 -0
  24. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatMessage.tsx +27 -0
  25. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatMessageInput.tsx +74 -0
  26. package/helper-apps/cortex-realtime-voice-server/client/src/chat/ChatTile.tsx +211 -0
  27. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/SoundEffects.ts +56 -0
  28. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/WavPacker.ts +112 -0
  29. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/WavRecorder.ts +571 -0
  30. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/WavStreamPlayer.ts +290 -0
  31. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/analysis/AudioAnalysis.ts +186 -0
  32. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/analysis/constants.ts +59 -0
  33. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/worklets/AudioProcessor.ts +214 -0
  34. package/helper-apps/cortex-realtime-voice-server/client/src/chat/audio/worklets/StreamProcessor.ts +183 -0
  35. package/helper-apps/cortex-realtime-voice-server/client/src/chat/components/AudioVisualizer.tsx +151 -0
  36. package/helper-apps/cortex-realtime-voice-server/client/src/chat/components/CopyButton.tsx +32 -0
  37. package/helper-apps/cortex-realtime-voice-server/client/src/chat/components/ImageOverlay.tsx +166 -0
  38. package/helper-apps/cortex-realtime-voice-server/client/src/chat/components/MicrophoneVisualizer.tsx +95 -0
  39. package/helper-apps/cortex-realtime-voice-server/client/src/chat/components/ScreenshotCapture.tsx +116 -0
  40. package/helper-apps/cortex-realtime-voice-server/client/src/chat/hooks/useWindowResize.ts +27 -0
  41. package/helper-apps/cortex-realtime-voice-server/client/src/chat/utils/audio.ts +33 -0
  42. package/helper-apps/cortex-realtime-voice-server/client/src/index.css +20 -0
  43. package/helper-apps/cortex-realtime-voice-server/client/src/index.tsx +19 -0
  44. package/helper-apps/cortex-realtime-voice-server/client/src/logo.svg +1 -0
  45. package/helper-apps/cortex-realtime-voice-server/client/src/react-app-env.d.ts +1 -0
  46. package/helper-apps/cortex-realtime-voice-server/client/src/reportWebVitals.ts +15 -0
  47. package/helper-apps/cortex-realtime-voice-server/client/src/setupTests.ts +5 -0
  48. package/helper-apps/cortex-realtime-voice-server/client/src/utils/logger.ts +45 -0
  49. package/helper-apps/cortex-realtime-voice-server/client/tailwind.config.js +14 -0
  50. package/helper-apps/cortex-realtime-voice-server/client/tsconfig.json +30 -0
  51. package/helper-apps/cortex-realtime-voice-server/client/vite.config.ts +22 -0
  52. package/helper-apps/cortex-realtime-voice-server/index.ts +19 -0
  53. package/helper-apps/cortex-realtime-voice-server/package.json +28 -0
  54. package/helper-apps/cortex-realtime-voice-server/src/ApiServer.ts +35 -0
  55. package/helper-apps/cortex-realtime-voice-server/src/SocketServer.ts +737 -0
  56. package/helper-apps/cortex-realtime-voice-server/src/Tools.ts +520 -0
  57. package/helper-apps/cortex-realtime-voice-server/src/cortex/expert.ts +29 -0
  58. package/helper-apps/cortex-realtime-voice-server/src/cortex/image.ts +29 -0
  59. package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +91 -0
  60. package/helper-apps/cortex-realtime-voice-server/src/cortex/reason.ts +29 -0
  61. package/helper-apps/cortex-realtime-voice-server/src/cortex/search.ts +30 -0
  62. package/helper-apps/cortex-realtime-voice-server/src/cortex/style.ts +31 -0
  63. package/helper-apps/cortex-realtime-voice-server/src/cortex/utils.ts +95 -0
  64. package/helper-apps/cortex-realtime-voice-server/src/cortex/vision.ts +34 -0
  65. package/helper-apps/cortex-realtime-voice-server/src/realtime/client.ts +499 -0
  66. package/helper-apps/cortex-realtime-voice-server/src/realtime/realtimeTypes.ts +279 -0
  67. package/helper-apps/cortex-realtime-voice-server/src/realtime/socket.ts +27 -0
  68. package/helper-apps/cortex-realtime-voice-server/src/realtime/transcription.ts +75 -0
  69. package/helper-apps/cortex-realtime-voice-server/src/realtime/utils.ts +33 -0
  70. package/helper-apps/cortex-realtime-voice-server/src/utils/logger.ts +45 -0
  71. package/helper-apps/cortex-realtime-voice-server/src/utils/prompt.ts +81 -0
  72. package/helper-apps/cortex-realtime-voice-server/tsconfig.json +28 -0
  73. package/package.json +1 -1
  74. package/pathways/basePathway.js +3 -1
  75. package/pathways/system/entity/memory/sys_memory_manager.js +3 -0
  76. package/pathways/system/entity/memory/sys_memory_update.js +44 -45
  77. package/pathways/system/entity/memory/sys_read_memory.js +86 -6
  78. package/pathways/system/entity/memory/sys_search_memory.js +66 -0
  79. package/pathways/system/entity/shared/sys_entity_constants.js +2 -2
  80. package/pathways/system/entity/sys_entity_continue.js +2 -1
  81. package/pathways/system/entity/sys_entity_start.js +10 -0
  82. package/pathways/system/entity/sys_generator_expert.js +0 -2
  83. package/pathways/system/entity/sys_generator_memory.js +31 -0
  84. package/pathways/system/entity/sys_generator_voice_sample.js +36 -0
  85. package/pathways/system/entity/sys_router_tool.js +13 -10
  86. package/pathways/system/sys_parse_numbered_object_list.js +1 -1
  87. package/server/pathwayResolver.js +41 -31
  88. package/server/plugins/azureVideoTranslatePlugin.js +28 -16
  89. package/server/plugins/claude3VertexPlugin.js +0 -9
  90. package/server/plugins/gemini15ChatPlugin.js +18 -5
  91. package/server/plugins/modelPlugin.js +27 -6
  92. package/server/plugins/openAiChatPlugin.js +10 -8
  93. package/server/plugins/openAiVisionPlugin.js +56 -0
  94. package/tests/memoryfunction.test.js +73 -1
@@ -0,0 +1,571 @@
1
+ import { AudioProcessorSrc } from './worklets/AudioProcessor';
2
+ import { AudioAnalysis, AudioAnalysisOutputType } from './analysis/AudioAnalysis';
3
+ import { WavPacker, WavPackerAudioType } from './WavPacker';
4
+
5
+ /**
6
+ * Decodes audio into a wav file
7
+ */
8
+ interface DecodedAudioType {
9
+ blob: Blob;
10
+ url: string;
11
+ values: Float32Array;
12
+ audioBuffer: AudioBuffer;
13
+ }
14
+
15
+ /**
16
+ * Records live stream of user audio as PCM16 "audio/wav" data
17
+ * @class
18
+ */
19
+ export class WavRecorder {
20
+ private readonly scriptSrc: string;
21
+ private readonly sampleRate: number;
22
+ private readonly outputToSpeakers: boolean;
23
+ private readonly debug: boolean;
24
+ private _deviceChangeCallback: null | (() => Promise<void>);
25
+ private stream: null | MediaStream;
26
+ private processor: null | AudioWorkletNode;
27
+ private source: null | MediaStreamAudioSourceNode;
28
+ private node: null | AudioNode;
29
+ private recording: boolean;
30
+ private _lastEventId: number;
31
+ private readonly eventReceipts: Record<number, any>;
32
+ private readonly eventTimeout: number;
33
+ private _chunkProcessor: (data: { mono: ArrayBuffer; raw: ArrayBuffer; }) => any;
34
+ private _chunkProcessorSize: number;
35
+ private _chunkProcessorBuffer: { mono: ArrayBuffer; raw: ArrayBuffer };
36
+ private analyser: null | AnalyserNode;
37
+ /**
38
+ * Create a new WavRecorder instance
39
+ * @param {{sampleRate?: number, outputToSpeakers?: boolean, debug?: boolean}} [options]
40
+ */
41
+ constructor({
42
+ sampleRate = 44100,
43
+ outputToSpeakers = false,
44
+ debug = false,
45
+ }: { sampleRate?: number; outputToSpeakers?: boolean; debug?: boolean; } = {}) {
46
+ // Script source
47
+ this.scriptSrc = AudioProcessorSrc;
48
+ // Config
49
+ this.sampleRate = sampleRate;
50
+ this.outputToSpeakers = outputToSpeakers;
51
+ this.debug = debug;
52
+ this._deviceChangeCallback = null;
53
+ // State variables
54
+ this.stream = null;
55
+ this.processor = null;
56
+ this.source = null;
57
+ this.node = null;
58
+ this.analyser = null;
59
+ this.recording = false;
60
+ // Event handling with AudioWorklet
61
+ this._lastEventId = 0;
62
+ this.eventReceipts = {};
63
+ this.eventTimeout = 5000;
64
+ // Process chunks of audio
65
+ this._chunkProcessor = () => {};
66
+ this._chunkProcessorSize = 0;
67
+ this._chunkProcessorBuffer = {
68
+ raw: new ArrayBuffer(0),
69
+ mono: new ArrayBuffer(0),
70
+ };
71
+ }
72
+
73
+ /**
74
+ * Decodes audio data from multiple formats to a Blob, url, Float32Array and AudioBuffer
75
+ * @param {Blob|Float32Array|Int16Array|ArrayBuffer|number[]} audioData
76
+ * @param {number} sampleRate
77
+ * @param {number} fromSampleRate
78
+ * @returns {Promise<DecodedAudioType>}
79
+ */
80
+ static async decode(audioData: Blob|Float32Array|Int16Array|ArrayBuffer|number[],
81
+ sampleRate = 44100,
82
+ fromSampleRate = -1): Promise<DecodedAudioType> {
83
+ const context = new AudioContext({ sampleRate });
84
+ let arrayBuffer;
85
+ let blob;
86
+ if (audioData instanceof Blob) {
87
+ if (fromSampleRate !== -1) {
88
+ throw new Error(
89
+ `Can not specify "fromSampleRate" when reading from Blob`,
90
+ );
91
+ }
92
+ blob = audioData;
93
+ arrayBuffer = await blob.arrayBuffer();
94
+ } else if (audioData instanceof ArrayBuffer) {
95
+ if (fromSampleRate !== -1) {
96
+ throw new Error(
97
+ `Can not specify "fromSampleRate" when reading from ArrayBuffer`,
98
+ );
99
+ }
100
+ arrayBuffer = audioData;
101
+ blob = new Blob([arrayBuffer], { type: 'audio/wav' });
102
+ } else {
103
+ let float32Array;
104
+ let data;
105
+ if (audioData instanceof Int16Array) {
106
+ data = audioData;
107
+ float32Array = new Float32Array(audioData.length);
108
+ for (let i = 0; i < audioData.length; i++) {
109
+ // @ts-ignore
110
+ float32Array[i] = audioData[i] / 0x8000;
111
+ }
112
+ } else if (audioData instanceof Float32Array) {
113
+ float32Array = audioData;
114
+ } else {
115
+ float32Array = new Float32Array(audioData);
116
+ }
117
+ if (fromSampleRate === -1) {
118
+ throw new Error(
119
+ `Must specify "fromSampleRate" when reading from Float32Array, In16Array or Array`,
120
+ );
121
+ } else if (fromSampleRate < 3000) {
122
+ throw new Error(`Minimum "fromSampleRate" is 3000 (3kHz)`);
123
+ }
124
+ if (!data) {
125
+ data = WavPacker.floatTo16BitPCM(float32Array);
126
+ }
127
+ const audio = {
128
+ bitsPerSample: 16,
129
+ channels: [float32Array],
130
+ data: new Int16Array(data),
131
+ };
132
+ const packer = new WavPacker();
133
+ const result = packer.pack(fromSampleRate, audio);
134
+ blob = result.blob;
135
+ arrayBuffer = await blob.arrayBuffer();
136
+ }
137
+ const audioBuffer = await context.decodeAudioData(arrayBuffer);
138
+ const values = audioBuffer.getChannelData(0);
139
+ const url = URL.createObjectURL(blob);
140
+ return {
141
+ blob,
142
+ url,
143
+ values,
144
+ audioBuffer,
145
+ };
146
+ }
147
+
148
+ /**
149
+ * Logs data in debug mode
150
+ * @returns {true}
151
+ * @param args
152
+ */
153
+ log(...args: any): true {
154
+ if (this.debug) {
155
+ console.log(args);
156
+ }
157
+ return true;
158
+ }
159
+
160
+ /**
161
+ * Retrieves the current sampleRate for the recorder
162
+ * @returns {number}
163
+ */
164
+ getSampleRate(): number {
165
+ return this.sampleRate;
166
+ }
167
+
168
+ /**
169
+ * Retrieves the current status of the recording
170
+ * @returns {"ended"|"paused"|"recording"}
171
+ */
172
+ getStatus(): "ended" | "paused" | "recording" {
173
+ if (!this.processor) {
174
+ return 'ended';
175
+ } else if (!this.recording) {
176
+ return 'paused';
177
+ } else {
178
+ return 'recording';
179
+ }
180
+ }
181
+
182
+ /**
183
+ * Gets the current MediaStream if one exists
184
+ * @returns {MediaStream | null} The current MediaStream or null if not recording
185
+ */
186
+ getStream(): MediaStream | null {
187
+ return this.stream;
188
+ }
189
+
190
+ /**
191
+ * Sends an event to the AudioWorklet
192
+ * @private
193
+ * @param {string} name
194
+ * @param {{[key: string]: any}} data
195
+ * @param {AudioWorkletNode} [_processor]
196
+ * @returns {Promise<{[key: string]: any}>}
197
+ */
198
+ async _event(name: string,
199
+ data: { [key: string]: any; } = {},
200
+ _processor: AudioWorkletNode | null = null): Promise<{ [key: string]: any; }> {
201
+ _processor = _processor || this.processor;
202
+ if (!_processor) {
203
+ throw new Error('Can not send events without recording first');
204
+ }
205
+ const message = {
206
+ event: name,
207
+ id: this._lastEventId++,
208
+ data,
209
+ };
210
+ _processor.port.postMessage(message);
211
+ const t0 = new Date().valueOf();
212
+ while (!this.eventReceipts[message.id]) {
213
+ if (new Date().valueOf() - t0 > this.eventTimeout) {
214
+ throw new Error(`Timeout waiting for "${name}" event`);
215
+ }
216
+ await new Promise((res) => setTimeout(() => res(true), 1));
217
+ }
218
+ const payload = this.eventReceipts[message.id];
219
+ delete this.eventReceipts[message.id];
220
+ return payload;
221
+ }
222
+
223
+ /**
224
+ * Sets device change callback, remove if callback provided is `null`
225
+ * @param {(Array<MediaDeviceInfo & {default: boolean}>) => void|null} callback
226
+ * @returns {true}
227
+ */
228
+ listenForDeviceChange(callback: null | ((p: Array<MediaDeviceInfo & {default: boolean}>) => void)): true {
229
+ if (callback === null && this._deviceChangeCallback) {
230
+ navigator.mediaDevices.removeEventListener(
231
+ 'devicechange',
232
+ this._deviceChangeCallback,
233
+ );
234
+ this._deviceChangeCallback = null;
235
+ } else if (callback !== null) {
236
+ // Basically a debounce; we only want this called once when devices change
237
+ // And we only want the most recent callback() to be executed
238
+ // if a few are operating at the same time
239
+ let lastId = 0;
240
+ let lastDevices: Array<MediaDeviceInfo & { default: boolean; }> = [];
241
+ const serializeDevices = (devices: Array<MediaDeviceInfo & { default: boolean; }>) =>
242
+ devices
243
+ .map((d) => d.deviceId)
244
+ .sort()
245
+ .join(',');
246
+ const cb = async () => {
247
+ let id = ++lastId;
248
+ const devices = await this.listDevices();
249
+ if (id === lastId) {
250
+ if (serializeDevices(lastDevices) !== serializeDevices(devices)) {
251
+ lastDevices = devices;
252
+ callback(devices.slice());
253
+ }
254
+ }
255
+ };
256
+ navigator.mediaDevices.addEventListener('devicechange', cb);
257
+ cb();
258
+ this._deviceChangeCallback = cb;
259
+ }
260
+ return true;
261
+ }
262
+
263
+ /**
264
+ * Manually request permission to use the microphone
265
+ * @returns {Promise<true>}
266
+ */
267
+ async requestPermission(): Promise<true> {
268
+ const permissionStatus = await navigator.permissions.query({
269
+ // @ts-ignore
270
+ name: 'microphone',
271
+ });
272
+ if (permissionStatus.state === 'denied') {
273
+ window.alert('You must grant microphone access to use this feature.');
274
+ } else if (permissionStatus.state === 'prompt') {
275
+ try {
276
+ const stream = await navigator.mediaDevices.getUserMedia({
277
+ audio: true,
278
+ });
279
+ const tracks = stream.getTracks();
280
+ tracks.forEach((track) => track.stop());
281
+ } catch (e) {
282
+ window.alert('You must grant microphone access to use this feature.');
283
+ }
284
+ }
285
+ return true;
286
+ }
287
+
288
+ /**
289
+ * List all eligible devices for recording, will request permission to use microphone
290
+ * @returns {Promise<Array<MediaDeviceInfo & {default: boolean}>>}
291
+ */
292
+ async listDevices(): Promise<Array<MediaDeviceInfo & { default: boolean; }>> {
293
+ if (
294
+ !navigator.mediaDevices ||
295
+ !('enumerateDevices' in navigator.mediaDevices)
296
+ ) {
297
+ throw new Error('Could not request user devices');
298
+ }
299
+ await this.requestPermission();
300
+ const devices = await navigator.mediaDevices.enumerateDevices();
301
+ const audioDevices = devices.filter(
302
+ (device) => device.kind === 'audioinput',
303
+ );
304
+ const defaultDeviceIndex = audioDevices.findIndex(
305
+ (device) => device.deviceId === 'default',
306
+ );
307
+ const deviceList = [];
308
+ if (defaultDeviceIndex !== -1) {
309
+ let defaultDevice = audioDevices.splice(defaultDeviceIndex, 1)[0];
310
+ let existingIndex = audioDevices.findIndex(
311
+ (device) => device.groupId === defaultDevice?.groupId,
312
+ );
313
+ if (existingIndex !== -1) {
314
+ defaultDevice = audioDevices.splice(existingIndex, 1)[0];
315
+ }
316
+ (defaultDevice as MediaDeviceInfo & { default: boolean; }).default = true;
317
+ deviceList.push(defaultDevice);
318
+ }
319
+ return deviceList.concat(audioDevices) as Array<MediaDeviceInfo & { default: boolean; }>;
320
+ }
321
+
322
+ /**
323
+ * Begins a recording session and requests microphone permissions if not already granted
324
+ * Microphone recording indicator will appear on browser tab but status will be "paused"
325
+ * @param {string} [deviceId] if no device provided, default device will be used
326
+ * @returns {Promise<true>}
327
+ */
328
+ async begin(deviceId: string | null): Promise<true> {
329
+ if (this.processor) {
330
+ throw new Error(
331
+ `Already connected: please call .end() to start a new session`,
332
+ );
333
+ }
334
+
335
+ if (
336
+ !navigator.mediaDevices ||
337
+ !('getUserMedia' in navigator.mediaDevices)
338
+ ) {
339
+ throw new Error('Could not request user media');
340
+ }
341
+ try {
342
+ const config: {audio: boolean | {deviceId : {exact: string}}} = { audio: true };
343
+ if (deviceId) {
344
+ config.audio = { deviceId: { exact: deviceId } };
345
+ }
346
+ this.stream = await navigator.mediaDevices.getUserMedia(config);
347
+ } catch (err) {
348
+ throw new Error('Could not start media stream');
349
+ }
350
+
351
+ const context = new AudioContext({ sampleRate: this.sampleRate });
352
+ const source = context.createMediaStreamSource(this.stream);
353
+ // Load and execute the module script.
354
+ try {
355
+ await context.audioWorklet.addModule(this.scriptSrc);
356
+ } catch (e) {
357
+ console.error(e);
358
+ throw new Error(`Could not add audioWorklet module: ${this.scriptSrc}`);
359
+ }
360
+ const processor = new AudioWorkletNode(context, 'audio_processor');
361
+ processor.port.onmessage = (e) => {
362
+ const { event, id, data } = e.data;
363
+ if (event === 'receipt') {
364
+ this.eventReceipts[id] = data;
365
+ } else if (event === 'chunk') {
366
+ if (this._chunkProcessorSize) {
367
+ const buffer = this._chunkProcessorBuffer;
368
+ this._chunkProcessorBuffer = {
369
+ raw: WavPacker.mergeBuffers(buffer.raw, data.raw),
370
+ mono: WavPacker.mergeBuffers(buffer.mono, data.mono),
371
+ };
372
+ if (
373
+ this._chunkProcessorBuffer.mono.byteLength >=
374
+ this._chunkProcessorSize
375
+ ) {
376
+ this._chunkProcessor(this._chunkProcessorBuffer);
377
+ this._chunkProcessorBuffer = {
378
+ raw: new ArrayBuffer(0),
379
+ mono: new ArrayBuffer(0),
380
+ };
381
+ }
382
+ } else {
383
+ this._chunkProcessor(data);
384
+ }
385
+ }
386
+ };
387
+
388
+ const node = source.connect(processor);
389
+ const analyser = context.createAnalyser();
390
+ analyser.fftSize = 8192;
391
+ analyser.smoothingTimeConstant = 0.1;
392
+ node.connect(analyser);
393
+ if (this.outputToSpeakers) {
394
+ // eslint-disable-next-line no-console
395
+ console.warn(
396
+ 'Warning: Output to speakers may affect sound quality,\n' +
397
+ 'especially due to system audio feedback preventative measures.\n' +
398
+ 'use only for debugging',
399
+ );
400
+ analyser.connect(context.destination);
401
+ }
402
+
403
+ this.source = source;
404
+ this.node = node;
405
+ this.analyser = analyser;
406
+ this.processor = processor;
407
+ return true;
408
+ }
409
+
410
+ /**
411
+ * Gets the current frequency domain data from the recording track
412
+ * @param {"frequency"|"music"|"voice"} [analysisType]
413
+ * @param {number} [minDecibels] default -100
414
+ * @param {number} [maxDecibels] default -30
415
+ * @returns {import('./analysis/audio_analysis.js').AudioAnalysisOutputType}
416
+ */
417
+ getFrequencies(
418
+ analysisType: "frequency" | "music" | "voice" = 'frequency',
419
+ minDecibels: number = -100,
420
+ maxDecibels: number = -30,
421
+ ): AudioAnalysisOutputType {
422
+ if (!this.processor || !this.analyser) {
423
+ throw new Error('Session ended: please call .begin() first');
424
+ }
425
+ return AudioAnalysis.getFrequencies(
426
+ this.analyser,
427
+ this.sampleRate,
428
+ null,
429
+ analysisType,
430
+ minDecibels,
431
+ maxDecibels,
432
+ );
433
+ }
434
+
435
+ /**
436
+ * Pauses the recording
437
+ * Keeps microphone stream open but halts storage of audio
438
+ * @returns {Promise<true>}
439
+ */
440
+ async pause(): Promise<true> {
441
+ if (!this.processor) {
442
+ throw new Error('Session ended: please call .begin() first');
443
+ } else if (!this.recording) {
444
+ throw new Error('Already paused: please call .record() first');
445
+ }
446
+ if (this._chunkProcessorBuffer.raw.byteLength) {
447
+ this._chunkProcessor(this._chunkProcessorBuffer);
448
+ }
449
+ this.log('Pausing ...');
450
+ await this._event('stop');
451
+ this.recording = false;
452
+ return true;
453
+ }
454
+
455
+ /**
456
+ * Start recording stream and storing to memory from the connected audio source
457
+ * @param {(data: { mono: Int16Array; raw: Int16Array }) => any} [chunkProcessor]
458
+ * @param {number} [chunkSize] chunkProcessor will not be triggered until this size threshold met in mono audio
459
+ * @returns {Promise<true>}
460
+ */
461
+ async record(chunkProcessor: (data: { mono: ArrayBuffer; raw: ArrayBuffer; }) => any = () => {
462
+ }, chunkSize: number = 8192): Promise<true> {
463
+ if (!this.processor) {
464
+ throw new Error('Session ended: please call .begin() first');
465
+ } else if (this.recording) {
466
+ throw new Error('Already recording: please call .pause() first');
467
+ } else if (typeof chunkProcessor !== 'function') {
468
+ throw new Error(`chunkProcessor must be a function`);
469
+ }
470
+ this._chunkProcessor = chunkProcessor;
471
+ this._chunkProcessorSize = chunkSize;
472
+ this._chunkProcessorBuffer = {
473
+ raw: new ArrayBuffer(0),
474
+ mono: new ArrayBuffer(0),
475
+ };
476
+ this.log('Recording ...');
477
+ await this._event('start');
478
+ this.recording = true;
479
+ return true;
480
+ }
481
+
482
+ /**
483
+ * Clears the audio buffer, empties stored recording
484
+ * @returns {Promise<true>}
485
+ */
486
+ async clear() {
487
+ if (!this.processor) {
488
+ throw new Error('Session ended: please call .begin() first');
489
+ }
490
+ await this._event('clear');
491
+ return true;
492
+ }
493
+
494
+ /**
495
+ * Reads the current audio stream data
496
+ * @returns {Promise<{meanValues: Float32Array, channels: Array<Float32Array>}>}
497
+ */
498
+ async read(): Promise<{ meanValues: Float32Array; channels: Array<Float32Array>; }> {
499
+ if (!this.processor) {
500
+ throw new Error('Session ended: please call .begin() first');
501
+ }
502
+ this.log('Reading ...');
503
+ return await this._event('read') as { meanValues: Float32Array; channels: Array<Float32Array>; };
504
+ }
505
+
506
+ /**
507
+ * Saves the current audio stream to a file
508
+ * @param {boolean} [force] Force saving while still recording
509
+ * @returns {Promise<WavPackerAudioType>}
510
+ */
511
+ async save(force: boolean = false): Promise<WavPackerAudioType> {
512
+ if (!this.processor) {
513
+ throw new Error('Session ended: please call .begin() first');
514
+ }
515
+ if (!force && this.recording) {
516
+ throw new Error(
517
+ 'Currently recording: please call .pause() first, or call .save(true) to force',
518
+ );
519
+ }
520
+ this.log('Exporting ...');
521
+ const exportData = await this._event('export');
522
+ const packer = new WavPacker();
523
+ return packer.pack(this.sampleRate, exportData.audio);
524
+ }
525
+
526
+ /**
527
+ * Ends the current recording session and saves the result
528
+ * @returns {Promise<WavPackerAudioType>}
529
+ */
530
+ async end(): Promise<WavPackerAudioType> {
531
+ if (!this.processor) {
532
+ throw new Error('Session ended: please call .begin() first');
533
+ }
534
+
535
+ const _processor = this.processor;
536
+
537
+ this.log('Stopping ...');
538
+ await this._event('stop');
539
+ this.recording = false;
540
+ const tracks = this.stream?.getTracks();
541
+ tracks?.forEach((track) => track.stop());
542
+
543
+ this.log('Exporting ...');
544
+ const exportData = await this._event('export', {}, _processor);
545
+
546
+ this.processor.disconnect();
547
+ this.source?.disconnect();
548
+ this.node?.disconnect();
549
+ this.analyser?.disconnect();
550
+ this.stream = null;
551
+ this.processor = null;
552
+ this.source = null;
553
+ this.node = null;
554
+
555
+ const packer = new WavPacker();
556
+ return packer.pack(this.sampleRate, exportData.audio);
557
+ }
558
+
559
+ /**
560
+ * Performs a full cleanup of WavRecorder instance
561
+ * Stops actively listening via microphone and removes existing listeners
562
+ * @returns {Promise<true>}
563
+ */
564
+ async quit(): Promise<true> {
565
+ this.listenForDeviceChange(null);
566
+ if (this.processor) {
567
+ await this.end();
568
+ }
569
+ return true;
570
+ }
571
+ }