@omote/core 0.5.5 → 0.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1813,6 +1813,16 @@ function getModelCache() {
1813
1813
  return cacheInstance;
1814
1814
  }
1815
1815
  var MAX_CACHE_SIZE_BYTES = 500 * 1024 * 1024;
1816
+ function fetchWithTimeout(url, timeoutMs, signal) {
1817
+ const controller = new AbortController();
1818
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
1819
+ const onCallerAbort = () => controller.abort();
1820
+ signal?.addEventListener("abort", onCallerAbort, { once: true });
1821
+ return fetch(url, { signal: controller.signal }).finally(() => {
1822
+ clearTimeout(timer);
1823
+ signal?.removeEventListener("abort", onCallerAbort);
1824
+ });
1825
+ }
1816
1826
  async function fetchWithCache(url, optionsOrProgress) {
1817
1827
  let options = {};
1818
1828
  if (typeof optionsOrProgress === "function") {
@@ -1866,61 +1876,84 @@ async function fetchWithCache(url, optionsOrProgress) {
1866
1876
  }
1867
1877
  span?.setAttributes({ "fetch.cache_hit": false });
1868
1878
  console.log(`[ModelCache] Cache miss, fetching: ${url}`);
1869
- try {
1870
- const response = await fetch(url);
1871
- if (!response.ok) {
1872
- throw new Error(`Failed to fetch ${url}: ${response.status}`);
1873
- }
1874
- const contentLength = response.headers.get("content-length");
1875
- const total = contentLength ? parseInt(contentLength, 10) : 0;
1876
- const etag = response.headers.get("etag") ?? void 0;
1877
- const tooLargeForCache = total > MAX_CACHE_SIZE_BYTES;
1878
- if (tooLargeForCache) {
1879
- console.log(`[ModelCache] File too large for IndexedDB (${(total / 1024 / 1024).toFixed(0)}MB > 500MB), using HTTP cache only`);
1880
- }
1881
- if (!response.body) {
1882
- const data2 = await response.arrayBuffer();
1879
+ const timeout = options.timeoutMs ?? 12e4;
1880
+ const maxRetries = options.maxRetries ?? 2;
1881
+ let lastError = null;
1882
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
1883
+ if (options.signal?.aborted) {
1884
+ throw new Error(`Fetch aborted for ${url}`);
1885
+ }
1886
+ if (attempt > 0) {
1887
+ const backoff = Math.min(2e3 * Math.pow(2, attempt - 1), 16e3);
1888
+ console.log(`[ModelCache] Retry ${attempt}/${maxRetries} after ${backoff}ms: ${url}`);
1889
+ await new Promise((r) => setTimeout(r, backoff));
1890
+ }
1891
+ try {
1892
+ const response = await fetchWithTimeout(url, timeout, options.signal);
1893
+ if (!response.ok) {
1894
+ throw new Error(`Failed to fetch ${url}: ${response.status}`);
1895
+ }
1896
+ const contentLength = response.headers.get("content-length");
1897
+ const total = contentLength ? parseInt(contentLength, 10) : 0;
1898
+ const etag = response.headers.get("etag") ?? void 0;
1899
+ const tooLargeForCache = total > MAX_CACHE_SIZE_BYTES;
1900
+ if (tooLargeForCache) {
1901
+ console.log(`[ModelCache] File too large for IndexedDB (${(total / 1024 / 1024).toFixed(0)}MB > 500MB), using HTTP cache only`);
1902
+ }
1903
+ if (!response.body) {
1904
+ const data2 = await response.arrayBuffer();
1905
+ if (!tooLargeForCache) {
1906
+ await cache.set(cacheKey, data2, etag, version);
1907
+ }
1908
+ span?.setAttributes({
1909
+ "fetch.size_bytes": data2.byteLength,
1910
+ "fetch.cached_to_indexeddb": !tooLargeForCache,
1911
+ ...attempt > 0 && { "fetch.retry_count": attempt }
1912
+ });
1913
+ span?.end();
1914
+ return data2;
1915
+ }
1916
+ const reader = response.body.getReader();
1917
+ const chunks = [];
1918
+ let loaded = 0;
1919
+ while (true) {
1920
+ const { done, value } = await reader.read();
1921
+ if (done) break;
1922
+ chunks.push(value);
1923
+ loaded += value.length;
1924
+ onProgress?.(loaded, total || loaded);
1925
+ }
1926
+ const data = new Uint8Array(loaded);
1927
+ let offset = 0;
1928
+ for (const chunk of chunks) {
1929
+ data.set(chunk, offset);
1930
+ offset += chunk.length;
1931
+ }
1932
+ const buffer = data.buffer;
1883
1933
  if (!tooLargeForCache) {
1884
- await cache.set(cacheKey, data2, etag, version);
1934
+ await cache.set(cacheKey, buffer, etag, version);
1935
+ console.log(`[ModelCache] Cached: ${url} (${(buffer.byteLength / 1024 / 1024).toFixed(1)}MB)`);
1885
1936
  }
1886
1937
  span?.setAttributes({
1887
- "fetch.size_bytes": data2.byteLength,
1888
- "fetch.cached_to_indexeddb": !tooLargeForCache
1938
+ "fetch.size_bytes": buffer.byteLength,
1939
+ "fetch.cached_to_indexeddb": !tooLargeForCache,
1940
+ ...attempt > 0 && { "fetch.retry_count": attempt }
1889
1941
  });
1890
1942
  span?.end();
1891
- return data2;
1892
- }
1893
- const reader = response.body.getReader();
1894
- const chunks = [];
1895
- let loaded = 0;
1896
- while (true) {
1897
- const { done, value } = await reader.read();
1898
- if (done) break;
1899
- chunks.push(value);
1900
- loaded += value.length;
1901
- onProgress?.(loaded, total || loaded);
1902
- }
1903
- const data = new Uint8Array(loaded);
1904
- let offset = 0;
1905
- for (const chunk of chunks) {
1906
- data.set(chunk, offset);
1907
- offset += chunk.length;
1908
- }
1909
- const buffer = data.buffer;
1910
- if (!tooLargeForCache) {
1911
- await cache.set(cacheKey, buffer, etag, version);
1912
- console.log(`[ModelCache] Cached: ${url} (${(buffer.byteLength / 1024 / 1024).toFixed(1)}MB)`);
1943
+ return buffer;
1944
+ } catch (error) {
1945
+ lastError = error instanceof Error ? error : new Error(String(error));
1946
+ if (options.signal?.aborted) {
1947
+ span?.endWithError(lastError);
1948
+ throw lastError;
1949
+ }
1950
+ if (attempt < maxRetries) {
1951
+ console.warn(`[ModelCache] Attempt ${attempt + 1} failed for ${url}: ${lastError.message}`);
1952
+ }
1913
1953
  }
1914
- span?.setAttributes({
1915
- "fetch.size_bytes": buffer.byteLength,
1916
- "fetch.cached_to_indexeddb": !tooLargeForCache
1917
- });
1918
- span?.end();
1919
- return buffer;
1920
- } catch (error) {
1921
- span?.endWithError(error instanceof Error ? error : new Error(String(error)));
1922
- throw error;
1923
1954
  }
1955
+ span?.endWithError(lastError);
1956
+ throw lastError;
1924
1957
  }
1925
1958
  async function preloadModels(urls, onProgress) {
1926
1959
  const cache = getModelCache();
@@ -2169,6 +2202,15 @@ function getSessionOptions(backend) {
2169
2202
  graphOptimizationLevel: "all"
2170
2203
  };
2171
2204
  }
2205
+ function withTimeout(promise, ms, label) {
2206
+ return new Promise((resolve, reject) => {
2207
+ const timer = setTimeout(
2208
+ () => reject(new Error(`${label} timed out after ${ms}ms`)),
2209
+ ms
2210
+ );
2211
+ promise.then(resolve, reject).finally(() => clearTimeout(timer));
2212
+ });
2213
+ }
2172
2214
 
2173
2215
  // src/inference/blendshapeUtils.ts
2174
2216
  var LAM_BLENDSHAPES = [
@@ -2441,7 +2483,11 @@ var _Wav2Vec2Inference = class _Wav2Vec2Inference {
2441
2483
  )
2442
2484
  });
2443
2485
  try {
2444
- this.session = await this.ort.InferenceSession.create(modelUrl, sessionOptions);
2486
+ this.session = await withTimeout(
2487
+ this.ort.InferenceSession.create(modelUrl, sessionOptions),
2488
+ 18e4,
2489
+ "Wav2Vec2 InferenceSession.create (iOS URL pass-through)"
2490
+ );
2445
2491
  } catch (sessionErr) {
2446
2492
  logger3.error("iOS: InferenceSession.create() failed", {
2447
2493
  error: sessionErr instanceof Error ? sessionErr.message : String(sessionErr),
@@ -3529,9 +3575,10 @@ var _SenseVoiceInference = class _SenseVoiceInference {
3529
3575
  logger5.info("iOS: passing model URL directly to ORT (low-memory path)", {
3530
3576
  modelUrl: this.config.modelUrl
3531
3577
  });
3532
- this.session = await this.ort.InferenceSession.create(
3533
- this.config.modelUrl,
3534
- sessionOptions
3578
+ this.session = await withTimeout(
3579
+ this.ort.InferenceSession.create(this.config.modelUrl, sessionOptions),
3580
+ 18e4,
3581
+ "SenseVoice InferenceSession.create (iOS URL pass-through)"
3535
3582
  );
3536
3583
  } else {
3537
3584
  const cache = getModelCache();
@@ -3767,7 +3814,7 @@ var SenseVoiceInference = _SenseVoiceInference;
3767
3814
  // src/inference/SenseVoiceWorker.ts
3768
3815
  var logger6 = createLogger("SenseVoiceWorker");
3769
3816
  var WASM_CDN_PATH2 = "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/";
3770
- var LOAD_TIMEOUT_MS = 3e4;
3817
+ var LOAD_TIMEOUT_MS = 3e5;
3771
3818
  var INFERENCE_TIMEOUT_MS = 1e4;
3772
3819
  function resolveUrl(url) {
3773
3820
  if (/^https?:\/\//i.test(url) || /^blob:/i.test(url)) return url;
@@ -3784,6 +3831,12 @@ var WORKER_SCRIPT = `
3784
3831
  var ort = null;
3785
3832
  var session = null;
3786
3833
  var tokenMap = null;
3834
+
3835
+ function fetchWithTimeout(url, timeoutMs) {
3836
+ var controller = new AbortController();
3837
+ var timer = setTimeout(function() { controller.abort(); }, timeoutMs);
3838
+ return fetch(url, { signal: controller.signal }).finally(function() { clearTimeout(timer); });
3839
+ }
3787
3840
  var negMean = null;
3788
3841
  var invStddev = null;
3789
3842
  var languageId = 0;
@@ -4227,7 +4280,7 @@ async function loadOrt(wasmPaths) {
4227
4280
  var ortUrl = wasmPaths + 'ort.wasm.min.js';
4228
4281
 
4229
4282
  // Load the script by fetching and executing it
4230
- var response = await fetch(ortUrl);
4283
+ var response = await fetchWithTimeout(ortUrl, 30000);
4231
4284
  var scriptText = await response.text();
4232
4285
 
4233
4286
  // Create a blob URL for the script
@@ -4253,7 +4306,7 @@ async function loadOrt(wasmPaths) {
4253
4306
  */
4254
4307
  async function loadModel(modelUrl, tokensUrl, isIOSDevice, lang, textNorm) {
4255
4308
  // 1. Fetch and parse tokens.txt
4256
- var tokensResponse = await fetch(tokensUrl);
4309
+ var tokensResponse = await fetchWithTimeout(tokensUrl, 30000);
4257
4310
  if (!tokensResponse.ok) {
4258
4311
  throw new Error('Failed to fetch tokens.txt: ' + tokensResponse.status + ' ' + tokensResponse.statusText);
4259
4312
  }
@@ -4276,7 +4329,7 @@ async function loadModel(modelUrl, tokensUrl, isIOSDevice, lang, textNorm) {
4276
4329
  session = await ort.InferenceSession.create(modelUrl, sessionOptions);
4277
4330
  } else {
4278
4331
  // Desktop: fetch ArrayBuffer for potential caching
4279
- var modelResponse = await fetch(modelUrl);
4332
+ var modelResponse = await fetchWithTimeout(modelUrl, 120000);
4280
4333
  if (!modelResponse.ok) {
4281
4334
  throw new Error('Failed to fetch model: ' + modelResponse.status + ' ' + modelResponse.statusText);
4282
4335
  }
@@ -4761,12 +4814,12 @@ var SenseVoiceWorker = class {
4761
4814
  // src/inference/UnifiedInferenceWorker.ts
4762
4815
  var logger7 = createLogger("UnifiedInferenceWorker");
4763
4816
  var WASM_CDN_PATH3 = "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/";
4764
- var INIT_TIMEOUT_MS = 15e3;
4765
- var SV_LOAD_TIMEOUT_MS = 3e4;
4817
+ var INIT_TIMEOUT_MS = 6e4;
4818
+ var SV_LOAD_TIMEOUT_MS = 3e5;
4766
4819
  var SV_INFER_TIMEOUT_MS = 1e4;
4767
- var CPU_LOAD_TIMEOUT_MS = 6e4;
4820
+ var CPU_LOAD_TIMEOUT_MS = 42e4;
4768
4821
  var CPU_INFER_TIMEOUT_MS = 5e3;
4769
- var VAD_LOAD_TIMEOUT_MS = 1e4;
4822
+ var VAD_LOAD_TIMEOUT_MS = 12e4;
4770
4823
  var VAD_INFER_TIMEOUT_MS = 1e3;
4771
4824
  var DISPOSE_TIMEOUT_MS = 5e3;
4772
4825
  function resolveUrl2(url) {
@@ -4787,6 +4840,12 @@ var WORKER_SCRIPT2 = `
4787
4840
 
4788
4841
  var ort = null;
4789
4842
 
4843
+ function fetchWithTimeout(url, timeoutMs) {
4844
+ var controller = new AbortController();
4845
+ var timer = setTimeout(function() { controller.abort(); }, timeoutMs);
4846
+ return fetch(url, { signal: controller.signal }).finally(function() { clearTimeout(timer); });
4847
+ }
4848
+
4790
4849
  // SenseVoice state
4791
4850
  var svSession = null;
4792
4851
  var svTokenMap = null;
@@ -5101,7 +5160,7 @@ function symmetrizeBlendshapes(frame) {
5101
5160
  async function loadOrt(wasmPaths, isIOSDevice) {
5102
5161
  if (ort) return;
5103
5162
  var ortUrl = wasmPaths + 'ort.wasm.min.js';
5104
- var response = await fetch(ortUrl);
5163
+ var response = await fetchWithTimeout(ortUrl, 30000);
5105
5164
  var scriptText = await response.text();
5106
5165
  var blob = new Blob([scriptText], { type: 'application/javascript' });
5107
5166
  var blobUrl = URL.createObjectURL(blob);
@@ -5119,7 +5178,7 @@ async function loadOrt(wasmPaths, isIOSDevice) {
5119
5178
  // \u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
5120
5179
 
5121
5180
  async function svLoad(msg) {
5122
- var tokensResponse = await fetch(msg.tokensUrl);
5181
+ var tokensResponse = await fetchWithTimeout(msg.tokensUrl, 30000);
5123
5182
  if (!tokensResponse.ok) throw new Error('Failed to fetch tokens.txt: ' + tokensResponse.status);
5124
5183
  var tokensText = await tokensResponse.text();
5125
5184
  svTokenMap = parseTokensFile(tokensText);
@@ -5130,7 +5189,7 @@ async function svLoad(msg) {
5130
5189
  if (msg.isIOS) {
5131
5190
  svSession = await ort.InferenceSession.create(msg.modelUrl, sessionOptions);
5132
5191
  } else {
5133
- var modelResponse = await fetch(msg.modelUrl);
5192
+ var modelResponse = await fetchWithTimeout(msg.modelUrl, 120000);
5134
5193
  if (!modelResponse.ok) throw new Error('Failed to fetch model: ' + modelResponse.status);
5135
5194
  var modelBuffer = await modelResponse.arrayBuffer();
5136
5195
  svSession = await ort.InferenceSession.create(new Uint8Array(modelBuffer), sessionOptions);
@@ -5205,11 +5264,11 @@ async function cpuLoad(msg) {
5205
5264
  }
5206
5265
  cpuSession = await ort.InferenceSession.create(msg.modelUrl, sessionOptions);
5207
5266
  } else {
5208
- var graphResponse = await fetch(msg.modelUrl);
5267
+ var graphResponse = await fetchWithTimeout(msg.modelUrl, 120000);
5209
5268
  if (!graphResponse.ok) throw new Error('Failed to fetch model graph: ' + graphResponse.status);
5210
5269
  var graphBuffer = await graphResponse.arrayBuffer();
5211
5270
  if (msg.externalDataUrl && dataFilename) {
5212
- var dataResponse = await fetch(msg.externalDataUrl);
5271
+ var dataResponse = await fetchWithTimeout(msg.externalDataUrl, 120000);
5213
5272
  if (!dataResponse.ok) throw new Error('Failed to fetch external data: ' + dataResponse.status);
5214
5273
  var dataBuffer = await dataResponse.arrayBuffer();
5215
5274
  sessionOptions.externalData = [{ path: dataFilename, data: new Uint8Array(dataBuffer) }];
@@ -5261,7 +5320,7 @@ async function vadLoad(msg) {
5261
5320
  vadChunkSize = vadSampleRate === 16000 ? 512 : 256;
5262
5321
  vadContextSize = vadSampleRate === 16000 ? 64 : 32;
5263
5322
 
5264
- var response = await fetch(msg.modelUrl);
5323
+ var response = await fetchWithTimeout(msg.modelUrl, 60000);
5265
5324
  if (!response.ok) throw new Error('Failed to fetch VAD model: ' + response.status);
5266
5325
  var modelBuffer = await response.arrayBuffer();
5267
5326
  vadSession = await ort.InferenceSession.create(new Uint8Array(modelBuffer), {
@@ -6109,7 +6168,11 @@ var _Wav2ArkitCpuInference = class _Wav2ArkitCpuInference {
6109
6168
  // URL string — ORT fetches directly into WASM
6110
6169
  }];
6111
6170
  }
6112
- this.session = await this.ort.InferenceSession.create(modelUrl, sessionOptions);
6171
+ this.session = await withTimeout(
6172
+ this.ort.InferenceSession.create(modelUrl, sessionOptions),
6173
+ 18e4,
6174
+ "Wav2ArkitCpu InferenceSession.create (iOS URL pass-through)"
6175
+ );
6113
6176
  } else {
6114
6177
  const cache = getModelCache();
6115
6178
  const isCached = await cache.has(modelUrl);
@@ -6360,7 +6423,7 @@ var Wav2ArkitCpuInference = _Wav2ArkitCpuInference;
6360
6423
  // src/inference/Wav2ArkitCpuWorker.ts
6361
6424
  var logger10 = createLogger("Wav2ArkitCpuWorker");
6362
6425
  var WASM_CDN_PATH4 = "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/";
6363
- var LOAD_TIMEOUT_MS2 = 6e4;
6426
+ var LOAD_TIMEOUT_MS2 = 42e4;
6364
6427
  var INFERENCE_TIMEOUT_MS2 = 5e3;
6365
6428
  function resolveUrl3(url) {
6366
6429
  if (/^https?:\/\//i.test(url) || /^blob:/i.test(url)) return url;
@@ -6377,6 +6440,12 @@ var WORKER_SCRIPT3 = `
6377
6440
  var ort = null;
6378
6441
  var session = null;
6379
6442
 
6443
+ function fetchWithTimeout(url, timeoutMs) {
6444
+ var controller = new AbortController();
6445
+ var timer = setTimeout(function() { controller.abort(); }, timeoutMs);
6446
+ return fetch(url, { signal: controller.signal }).finally(function() { clearTimeout(timer); });
6447
+ }
6448
+
6380
6449
  // Precomputed symmetric index pairs from LAM_BLENDSHAPES alphabetical ordering
6381
6450
  // Used to average left/right blendshape pairs for symmetrized output
6382
6451
  const SYMMETRIC_INDEX_PAIRS = [
@@ -6426,7 +6495,7 @@ async function loadOrt(wasmPaths) {
6426
6495
  const ortUrl = wasmPaths + 'ort.wasm.min.js';
6427
6496
 
6428
6497
  // Load the script by fetching and executing it
6429
- const response = await fetch(ortUrl);
6498
+ const response = await fetchWithTimeout(ortUrl, 30000);
6430
6499
  const scriptText = await response.text();
6431
6500
 
6432
6501
  // Create a blob URL for the script
@@ -6468,7 +6537,7 @@ async function loadModel(modelUrl, externalDataUrl, isIOS) {
6468
6537
  session = await ort.InferenceSession.create(modelUrl, sessionOptions);
6469
6538
  } else {
6470
6539
  // Desktop: fetch model graph as ArrayBuffer
6471
- const graphResponse = await fetch(modelUrl);
6540
+ const graphResponse = await fetchWithTimeout(modelUrl, 120000);
6472
6541
  if (!graphResponse.ok) {
6473
6542
  throw new Error('Failed to fetch model graph: ' + graphResponse.status + ' ' + graphResponse.statusText);
6474
6543
  }
@@ -6476,7 +6545,7 @@ async function loadModel(modelUrl, externalDataUrl, isIOS) {
6476
6545
 
6477
6546
  // Fetch external data file if present
6478
6547
  if (externalDataUrl && dataFilename) {
6479
- const dataResponse = await fetch(externalDataUrl);
6548
+ const dataResponse = await fetchWithTimeout(externalDataUrl, 120000);
6480
6549
  if (!dataResponse.ok) {
6481
6550
  throw new Error('Failed to fetch external data: ' + dataResponse.status + ' ' + dataResponse.statusText);
6482
6551
  }
@@ -7595,7 +7664,7 @@ SileroVADInference.isWebGPUAvailable = isWebGPUAvailable;
7595
7664
  // src/inference/SileroVADWorker.ts
7596
7665
  var logger13 = createLogger("SileroVADWorker");
7597
7666
  var WASM_CDN_PATH5 = "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/";
7598
- var LOAD_TIMEOUT_MS3 = 1e4;
7667
+ var LOAD_TIMEOUT_MS3 = 12e4;
7599
7668
  var INFERENCE_TIMEOUT_MS3 = 1e3;
7600
7669
  function resolveUrl4(url) {
7601
7670
  if (/^https?:\/\//i.test(url) || /^blob:/i.test(url)) return url;
@@ -7611,6 +7680,13 @@ var WORKER_SCRIPT4 = `
7611
7680
 
7612
7681
  var ort = null;
7613
7682
  var session = null;
7683
+
7684
+ function fetchWithTimeout(url, timeoutMs) {
7685
+ var controller = new AbortController();
7686
+ var timer = setTimeout(function() { controller.abort(); }, timeoutMs);
7687
+ return fetch(url, { signal: controller.signal }).finally(function() { clearTimeout(timer); });
7688
+ }
7689
+
7614
7690
  var sampleRate = 16000;
7615
7691
  var chunkSize = 512;
7616
7692
  var contextSize = 64;
@@ -7626,7 +7702,7 @@ async function loadOrt(wasmPaths) {
7626
7702
  const ortUrl = wasmPaths + 'ort.wasm.min.js';
7627
7703
 
7628
7704
  // Load the script by fetching and executing it
7629
- const response = await fetch(ortUrl);
7705
+ const response = await fetchWithTimeout(ortUrl, 30000);
7630
7706
  const scriptText = await response.text();
7631
7707
 
7632
7708
  // Create a blob URL for the script
@@ -7656,7 +7732,7 @@ async function loadModel(modelUrl, sr) {
7656
7732
  contextSize = sr === 16000 ? 64 : 32;
7657
7733
 
7658
7734
  // Fetch model data
7659
- const response = await fetch(modelUrl);
7735
+ const response = await fetchWithTimeout(modelUrl, 60000);
7660
7736
  if (!response.ok) {
7661
7737
  throw new Error('Failed to fetch model: ' + response.status + ' ' + response.statusText);
7662
7738
  }