@layercode/js-sdk 2.1.2 → 2.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -795,6 +795,8 @@ class AudioProcessor extends AudioWorkletProcessor {
|
|
|
795
795
|
constructor() {
|
|
796
796
|
super();
|
|
797
797
|
this.port.onmessage = this.receive.bind(this);
|
|
798
|
+
this.downsampleRatio = 1;
|
|
799
|
+
this.downsampleOffset = 0;
|
|
798
800
|
this.initialize();
|
|
799
801
|
}
|
|
800
802
|
|
|
@@ -802,6 +804,7 @@ class AudioProcessor extends AudioWorkletProcessor {
|
|
|
802
804
|
this.foundAudio = false;
|
|
803
805
|
this.recording = false;
|
|
804
806
|
this.chunks = [];
|
|
807
|
+
this.downsampleOffset = 0;
|
|
805
808
|
}
|
|
806
809
|
|
|
807
810
|
/**
|
|
@@ -908,9 +911,12 @@ class AudioProcessor extends AudioWorkletProcessor {
|
|
|
908
911
|
}
|
|
909
912
|
|
|
910
913
|
receive(e) {
|
|
911
|
-
const { event, id } = e.data;
|
|
914
|
+
const { event, id, data } = e.data;
|
|
912
915
|
let receiptData = {};
|
|
913
916
|
switch (event) {
|
|
917
|
+
case 'configure':
|
|
918
|
+
this.configure(data);
|
|
919
|
+
return;
|
|
914
920
|
case 'start':
|
|
915
921
|
this.recording = true;
|
|
916
922
|
break;
|
|
@@ -933,6 +939,24 @@ class AudioProcessor extends AudioWorkletProcessor {
|
|
|
933
939
|
this.port.postMessage({ event: 'receipt', id, data: receiptData });
|
|
934
940
|
}
|
|
935
941
|
|
|
942
|
+
configure(config = {}) {
|
|
943
|
+
const inputSampleRate = config?.inputSampleRate;
|
|
944
|
+
const targetSampleRate = config?.targetSampleRate;
|
|
945
|
+
if (
|
|
946
|
+
typeof inputSampleRate === 'number' &&
|
|
947
|
+
inputSampleRate > 0 &&
|
|
948
|
+
typeof targetSampleRate === 'number' &&
|
|
949
|
+
targetSampleRate > 0
|
|
950
|
+
) {
|
|
951
|
+
if (inputSampleRate <= targetSampleRate) {
|
|
952
|
+
this.downsampleRatio = 1;
|
|
953
|
+
} else {
|
|
954
|
+
this.downsampleRatio = inputSampleRate / targetSampleRate;
|
|
955
|
+
}
|
|
956
|
+
this.downsampleOffset = 0;
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
|
|
936
960
|
sendChunk(chunk) {
|
|
937
961
|
const channels = this.readChannelData([chunk]);
|
|
938
962
|
const { float32Array, meanValues } = this.formatAudioData(channels);
|
|
@@ -985,14 +1009,40 @@ class AudioProcessor extends AudioWorkletProcessor {
|
|
|
985
1009
|
}
|
|
986
1010
|
}
|
|
987
1011
|
if (inputs && inputs[0] && this.foundAudio && this.recording) {
|
|
988
|
-
// We need to copy the TypedArray, because the
|
|
1012
|
+
// We need to copy the TypedArray, because the \`process\`
|
|
989
1013
|
// internals will reuse the same buffer to hold each input
|
|
990
1014
|
const chunk = inputs.map((input) => input.slice(sliceIndex));
|
|
991
|
-
this.
|
|
992
|
-
|
|
1015
|
+
const processedChunk = this.downsampleChunk(chunk);
|
|
1016
|
+
if (processedChunk[0] && processedChunk[0].length) {
|
|
1017
|
+
this.chunks.push(processedChunk);
|
|
1018
|
+
this.sendChunk(processedChunk);
|
|
1019
|
+
}
|
|
993
1020
|
}
|
|
994
1021
|
return true;
|
|
995
1022
|
}
|
|
1023
|
+
|
|
1024
|
+
downsampleChunk(chunk) {
|
|
1025
|
+
if (this.downsampleRatio === 1) {
|
|
1026
|
+
return chunk;
|
|
1027
|
+
}
|
|
1028
|
+
const channelCount = chunk.length;
|
|
1029
|
+
if (!channelCount || !chunk[0]?.length) {
|
|
1030
|
+
return chunk;
|
|
1031
|
+
}
|
|
1032
|
+
const ratio = this.downsampleRatio;
|
|
1033
|
+
const inputLength = chunk[0].length;
|
|
1034
|
+
const outputs = Array.from({ length: channelCount }, () => []);
|
|
1035
|
+
let offset = this.downsampleOffset;
|
|
1036
|
+
while (offset < inputLength) {
|
|
1037
|
+
const sampleIndex = Math.floor(offset);
|
|
1038
|
+
for (let c = 0; c < channelCount; c++) {
|
|
1039
|
+
outputs[c].push(chunk[c][sampleIndex]);
|
|
1040
|
+
}
|
|
1041
|
+
offset += ratio;
|
|
1042
|
+
}
|
|
1043
|
+
this.downsampleOffset = offset - inputLength;
|
|
1044
|
+
return outputs.map((samples) => Float32Array.from(samples));
|
|
1045
|
+
}
|
|
996
1046
|
}
|
|
997
1047
|
|
|
998
1048
|
registerProcessor('audio_processor', AudioProcessor);
|
|
@@ -1038,10 +1088,13 @@ class WavRecorder {
|
|
|
1038
1088
|
this._devices = [];
|
|
1039
1089
|
// State variables
|
|
1040
1090
|
this.stream = null;
|
|
1091
|
+
this.audioContext = null;
|
|
1041
1092
|
this.processor = null;
|
|
1042
1093
|
this.source = null;
|
|
1043
1094
|
this.node = null;
|
|
1095
|
+
this.analyser = null;
|
|
1044
1096
|
this.recording = false;
|
|
1097
|
+
this.contextSampleRate = sampleRate;
|
|
1045
1098
|
// Event handling with AudioWorklet
|
|
1046
1099
|
this._lastEventId = 0;
|
|
1047
1100
|
this.eventReceipts = {};
|
|
@@ -1250,20 +1303,53 @@ class WavRecorder {
|
|
|
1250
1303
|
* @returns {Promise<true>}
|
|
1251
1304
|
*/
|
|
1252
1305
|
async requestPermission() {
|
|
1253
|
-
const
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1306
|
+
const ensureUserMediaAccess = async () => {
|
|
1307
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
1308
|
+
audio: true,
|
|
1309
|
+
});
|
|
1310
|
+
const tracks = stream.getTracks();
|
|
1311
|
+
tracks.forEach((track) => track.stop());
|
|
1312
|
+
};
|
|
1313
|
+
|
|
1314
|
+
const permissionsUnsupported =
|
|
1315
|
+
!navigator.permissions ||
|
|
1316
|
+
typeof navigator.permissions.query !== 'function';
|
|
1317
|
+
|
|
1318
|
+
if (permissionsUnsupported) {
|
|
1259
1319
|
try {
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1320
|
+
await ensureUserMediaAccess();
|
|
1321
|
+
} catch (error) {
|
|
1322
|
+
window.alert('You must grant microphone access to use this feature.');
|
|
1323
|
+
throw error;
|
|
1324
|
+
}
|
|
1325
|
+
return true;
|
|
1326
|
+
}
|
|
1327
|
+
|
|
1328
|
+
try {
|
|
1329
|
+
const permissionStatus = await navigator.permissions.query({
|
|
1330
|
+
name: 'microphone',
|
|
1331
|
+
});
|
|
1332
|
+
|
|
1333
|
+
if (permissionStatus.state === 'denied') {
|
|
1334
|
+
window.alert('You must grant microphone access to use this feature.');
|
|
1335
|
+
return true;
|
|
1336
|
+
}
|
|
1337
|
+
|
|
1338
|
+
if (permissionStatus.state === 'prompt') {
|
|
1339
|
+
try {
|
|
1340
|
+
await ensureUserMediaAccess();
|
|
1341
|
+
} catch (error) {
|
|
1342
|
+
window.alert('You must grant microphone access to use this feature.');
|
|
1343
|
+
throw error;
|
|
1344
|
+
}
|
|
1345
|
+
}
|
|
1346
|
+
} catch (error) {
|
|
1347
|
+
// Firefox rejects permissions.query with NotSupportedError – fall back to getUserMedia directly
|
|
1348
|
+
try {
|
|
1349
|
+
await ensureUserMediaAccess();
|
|
1350
|
+
} catch (fallbackError) {
|
|
1266
1351
|
window.alert('You must grant microphone access to use this feature.');
|
|
1352
|
+
throw fallbackError;
|
|
1267
1353
|
}
|
|
1268
1354
|
}
|
|
1269
1355
|
return true;
|
|
@@ -1299,6 +1385,10 @@ class WavRecorder {
|
|
|
1299
1385
|
}
|
|
1300
1386
|
defaultDevice.default = true;
|
|
1301
1387
|
deviceList.push(defaultDevice);
|
|
1388
|
+
} else if (audioDevices.length) {
|
|
1389
|
+
const fallbackDefault = audioDevices.shift();
|
|
1390
|
+
fallbackDefault.default = true;
|
|
1391
|
+
deviceList.push(fallbackDefault);
|
|
1302
1392
|
}
|
|
1303
1393
|
return deviceList.concat(audioDevices);
|
|
1304
1394
|
}
|
|
@@ -1340,8 +1430,36 @@ class WavRecorder {
|
|
|
1340
1430
|
throw new Error('Could not start media stream');
|
|
1341
1431
|
}
|
|
1342
1432
|
|
|
1343
|
-
const
|
|
1344
|
-
|
|
1433
|
+
const createContext = (rate) => {
|
|
1434
|
+
try {
|
|
1435
|
+
return rate ? new AudioContext({ sampleRate: rate }) : new AudioContext();
|
|
1436
|
+
} catch (error) {
|
|
1437
|
+
console.warn('Failed to create AudioContext with sampleRate', rate, error);
|
|
1438
|
+
return null;
|
|
1439
|
+
}
|
|
1440
|
+
};
|
|
1441
|
+
|
|
1442
|
+
let context = createContext(this.sampleRate);
|
|
1443
|
+
if (!context) {
|
|
1444
|
+
context = createContext();
|
|
1445
|
+
}
|
|
1446
|
+
if (!context) {
|
|
1447
|
+
throw new Error('Could not create AudioContext');
|
|
1448
|
+
}
|
|
1449
|
+
|
|
1450
|
+
let source;
|
|
1451
|
+
try {
|
|
1452
|
+
source = context.createMediaStreamSource(this.stream);
|
|
1453
|
+
} catch (error) {
|
|
1454
|
+
await context.close().catch(() => {});
|
|
1455
|
+
context = createContext();
|
|
1456
|
+
if (!context) {
|
|
1457
|
+
throw error;
|
|
1458
|
+
}
|
|
1459
|
+
source = context.createMediaStreamSource(this.stream);
|
|
1460
|
+
}
|
|
1461
|
+
|
|
1462
|
+
this.contextSampleRate = context.sampleRate;
|
|
1345
1463
|
// Load and execute the module script.
|
|
1346
1464
|
try {
|
|
1347
1465
|
await context.audioWorklet.addModule(this.scriptSrc);
|
|
@@ -1377,6 +1495,14 @@ class WavRecorder {
|
|
|
1377
1495
|
}
|
|
1378
1496
|
};
|
|
1379
1497
|
|
|
1498
|
+
processor.port.postMessage({
|
|
1499
|
+
event: 'configure',
|
|
1500
|
+
data: {
|
|
1501
|
+
inputSampleRate: this.contextSampleRate,
|
|
1502
|
+
targetSampleRate: this.sampleRate,
|
|
1503
|
+
},
|
|
1504
|
+
});
|
|
1505
|
+
|
|
1380
1506
|
const node = source.connect(processor);
|
|
1381
1507
|
const analyser = context.createAnalyser();
|
|
1382
1508
|
analyser.fftSize = 8192;
|
|
@@ -1392,6 +1518,15 @@ class WavRecorder {
|
|
|
1392
1518
|
analyser.connect(context.destination);
|
|
1393
1519
|
}
|
|
1394
1520
|
|
|
1521
|
+
if (context.state === 'suspended') {
|
|
1522
|
+
try {
|
|
1523
|
+
await context.resume();
|
|
1524
|
+
} catch (resumeError) {
|
|
1525
|
+
console.warn('AudioContext resume failed', resumeError);
|
|
1526
|
+
}
|
|
1527
|
+
}
|
|
1528
|
+
|
|
1529
|
+
this.audioContext = context;
|
|
1395
1530
|
this.source = source;
|
|
1396
1531
|
this.node = node;
|
|
1397
1532
|
this.analyser = analyser;
|
|
@@ -1589,6 +1724,17 @@ class WavRecorder {
|
|
|
1589
1724
|
this.processor = null;
|
|
1590
1725
|
this.source = null;
|
|
1591
1726
|
this.node = null;
|
|
1727
|
+
this.analyser = null;
|
|
1728
|
+
|
|
1729
|
+
if (this.audioContext) {
|
|
1730
|
+
try {
|
|
1731
|
+
await this.audioContext.close();
|
|
1732
|
+
} catch (contextError) {
|
|
1733
|
+
console.warn('Failed to close AudioContext', contextError);
|
|
1734
|
+
}
|
|
1735
|
+
this.audioContext = null;
|
|
1736
|
+
}
|
|
1737
|
+
this.contextSampleRate = null;
|
|
1592
1738
|
|
|
1593
1739
|
const packer = new WavPacker();
|
|
1594
1740
|
const result = packer.pack(this.sampleRate, exportData.audio);
|
|
@@ -3509,7 +3655,7 @@ function arrayBufferToBase64(arrayBuffer) {
|
|
|
3509
3655
|
const NOOP = () => { };
|
|
3510
3656
|
const DEFAULT_WS_URL = 'wss://api.layercode.com/v1/agents/web/websocket';
|
|
3511
3657
|
// SDK version - updated when publishing
|
|
3512
|
-
const SDK_VERSION = '2.1.
|
|
3658
|
+
const SDK_VERSION = '2.1.3';
|
|
3513
3659
|
/**
|
|
3514
3660
|
* @class LayercodeClient
|
|
3515
3661
|
* @classdesc Core client for Layercode audio agent that manages audio recording, WebSocket communication, and speech processing.
|
|
@@ -3866,22 +4012,14 @@ class LayercodeClient {
|
|
|
3866
4012
|
this.stopRecorderAmplitude = undefined;
|
|
3867
4013
|
}
|
|
3868
4014
|
/**
|
|
3869
|
-
* Connects to the Layercode agent and starts the audio conversation
|
|
4015
|
+
* Connects to the Layercode agent using the stored conversation ID and starts the audio conversation
|
|
3870
4016
|
* @async
|
|
3871
4017
|
* @returns {Promise<void>}
|
|
3872
4018
|
*/
|
|
3873
|
-
async connect(
|
|
4019
|
+
async connect() {
|
|
3874
4020
|
if (this.status === 'connecting') {
|
|
3875
4021
|
return;
|
|
3876
4022
|
}
|
|
3877
|
-
if (opts === null || opts === void 0 ? void 0 : opts.newConversation) {
|
|
3878
|
-
this.options.conversationId = null;
|
|
3879
|
-
this.conversationId = null;
|
|
3880
|
-
}
|
|
3881
|
-
else if (opts === null || opts === void 0 ? void 0 : opts.conversationId) {
|
|
3882
|
-
this.options.conversationId = opts.conversationId;
|
|
3883
|
-
this.conversationId = opts.conversationId;
|
|
3884
|
-
}
|
|
3885
4023
|
try {
|
|
3886
4024
|
this._setStatus('connecting');
|
|
3887
4025
|
// Reset turn tracking for clean start
|
|
@@ -3970,7 +4108,7 @@ class LayercodeClient {
|
|
|
3970
4108
|
this.currentTurnId = null;
|
|
3971
4109
|
console.debug('Reset turn tracking state');
|
|
3972
4110
|
}
|
|
3973
|
-
async disconnect(
|
|
4111
|
+
async disconnect() {
|
|
3974
4112
|
if (this.status === 'disconnected') {
|
|
3975
4113
|
return;
|
|
3976
4114
|
}
|
|
@@ -3982,7 +4120,7 @@ class LayercodeClient {
|
|
|
3982
4120
|
this.ws.close();
|
|
3983
4121
|
this.ws = null;
|
|
3984
4122
|
}
|
|
3985
|
-
await this._performDisconnectCleanup(
|
|
4123
|
+
await this._performDisconnectCleanup();
|
|
3986
4124
|
}
|
|
3987
4125
|
/**
|
|
3988
4126
|
* Gets the microphone MediaStream used by this client
|
|
@@ -4010,7 +4148,7 @@ class LayercodeClient {
|
|
|
4010
4148
|
const newStream = this.wavRecorder.getStream();
|
|
4011
4149
|
await this._reinitializeVAD(newStream);
|
|
4012
4150
|
}
|
|
4013
|
-
const reportedDeviceId = (_c = (_b = this.lastReportedDeviceId) !== null && _b !== void 0 ? _b : this.activeDeviceId) !== null && _c !== void 0 ? _c : (this.useSystemDefaultDevice ? 'default' :
|
|
4151
|
+
const reportedDeviceId = (_c = (_b = this.lastReportedDeviceId) !== null && _b !== void 0 ? _b : this.activeDeviceId) !== null && _c !== void 0 ? _c : (this.useSystemDefaultDevice ? 'default' : normalizedDeviceId !== null && normalizedDeviceId !== void 0 ? normalizedDeviceId : 'default');
|
|
4014
4152
|
console.debug(`Successfully switched to input device: ${reportedDeviceId}`);
|
|
4015
4153
|
}
|
|
4016
4154
|
catch (error) {
|
|
@@ -4047,7 +4185,7 @@ class LayercodeClient {
|
|
|
4047
4185
|
this.recorderStarted = true;
|
|
4048
4186
|
this._sendReadyIfNeeded();
|
|
4049
4187
|
}
|
|
4050
|
-
const reportedDeviceId = (_a = this.activeDeviceId) !== null && _a !== void 0 ? _a : (this.useSystemDefaultDevice ? 'default' : (
|
|
4188
|
+
const reportedDeviceId = (_a = this.activeDeviceId) !== null && _a !== void 0 ? _a : (this.useSystemDefaultDevice ? 'default' : (_b = this.deviceId) !== null && _b !== void 0 ? _b : 'default');
|
|
4051
4189
|
if (reportedDeviceId !== previousReportedDeviceId) {
|
|
4052
4190
|
this.lastReportedDeviceId = reportedDeviceId;
|
|
4053
4191
|
if (this.options.onDeviceSwitched) {
|
|
@@ -4129,7 +4267,10 @@ class LayercodeClient {
|
|
|
4129
4267
|
_teardownDeviceListeners() {
|
|
4130
4268
|
this.wavRecorder.listenForDeviceChange(null);
|
|
4131
4269
|
}
|
|
4132
|
-
|
|
4270
|
+
/**
|
|
4271
|
+
* Releases audio resources and listeners after a disconnect
|
|
4272
|
+
*/
|
|
4273
|
+
async _performDisconnectCleanup() {
|
|
4133
4274
|
var _a, _b;
|
|
4134
4275
|
this.deviceId = null;
|
|
4135
4276
|
this.activeDeviceId = null;
|
|
@@ -4149,13 +4290,7 @@ class LayercodeClient {
|
|
|
4149
4290
|
(_b = (_a = this.wavPlayer).stop) === null || _b === void 0 ? void 0 : _b.call(_a);
|
|
4150
4291
|
this.wavPlayer.disconnect();
|
|
4151
4292
|
this._resetTurnTracking();
|
|
4152
|
-
|
|
4153
|
-
this.options.conversationId = null;
|
|
4154
|
-
this.conversationId = null;
|
|
4155
|
-
}
|
|
4156
|
-
else {
|
|
4157
|
-
this.options.conversationId = this.conversationId;
|
|
4158
|
-
}
|
|
4293
|
+
this.options.conversationId = this.conversationId;
|
|
4159
4294
|
this.userAudioAmplitude = 0;
|
|
4160
4295
|
this.agentAudioAmplitude = 0;
|
|
4161
4296
|
this._setStatus('disconnected');
|