@beltoinc/slyos-sdk 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,1856 @@
1
+ import axios from 'axios';
2
+ import { pipeline, env } from '@huggingface/transformers';
3
+
4
+ // @ts-ignore - Force CPU in Node.js
5
+ if (env.backends?.onnx?.wasm) {
6
+ env.backends.onnx.wasm.proxy = false;
7
+ }
8
+
9
+ // ─── Types ──────────────────────────────────────────────────────────
10
+
11
+ interface SlyOSConfig {
12
+ apiKey: string;
13
+ apiUrl?: string;
14
+ onProgress?: ProgressCallback;
15
+ onEvent?: EventCallback;
16
+ }
17
+
18
+ interface GenerateOptions {
19
+ temperature?: number;
20
+ maxTokens?: number;
21
+ topP?: number;
22
+ }
23
+
24
+ interface TranscribeOptions {
25
+ language?: string;
26
+ returnTimestamps?: boolean;
27
+ }
28
+
29
+ type ModelCategory = 'llm' | 'stt';
30
+ type QuantizationLevel = 'q4' | 'q8' | 'fp16' | 'fp32';
31
+
32
+ interface ModelInfo {
33
+ hfModel: string;
34
+ task: string;
35
+ category: ModelCategory;
36
+ sizesMB: Record<QuantizationLevel, number>;
37
+ minRAM_MB: Record<QuantizationLevel, number>;
38
+ }
39
+
40
+ interface DeviceProfile {
41
+ cpuCores: number;
42
+ memoryMB: number;
43
+ estimatedStorageMB: number;
44
+ platform: 'web' | 'nodejs';
45
+ os: string;
46
+ recommendedQuant: QuantizationLevel;
47
+ maxContextWindow: number;
48
+ // Enhanced device intelligence fields
49
+ deviceFingerprint?: string;
50
+ gpuRenderer?: string;
51
+ gpuVramMb?: number;
52
+ screenWidth?: number;
53
+ screenHeight?: number;
54
+ pixelRatio?: number;
55
+ browserName?: string;
56
+ browserVersion?: string;
57
+ networkType?: string;
58
+ latencyToApiMs?: number;
59
+ timezone?: string;
60
+ wasmAvailable?: boolean;
61
+ webgpuAvailable?: boolean;
62
+ }
63
+
64
+ interface ProgressEvent {
65
+ stage: 'initializing' | 'profiling' | 'downloading' | 'loading' | 'ready' | 'generating' | 'transcribing' | 'error';
66
+ progress: number; // 0-100
67
+ message: string;
68
+ detail?: any;
69
+ }
70
+
71
+ interface SlyEvent {
72
+ type: 'auth' | 'device_registered' | 'device_profiled' | 'model_download_start' | 'model_download_progress' | 'model_loaded' | 'inference_start' | 'inference_complete' | 'error' | 'fallback_success' | 'fallback_error' | 'telemetry_flushed';
73
+ data?: any;
74
+ timestamp: number;
75
+ }
76
+
77
+ type ProgressCallback = (event: ProgressEvent) => void;
78
+ type EventCallback = (event: SlyEvent) => void;
79
+
80
+ // ─── OpenAI Compatibility Types ──────────────────────────────────────
81
+
82
+ interface OpenAIMessage {
83
+ role: 'system' | 'user' | 'assistant';
84
+ content: string;
85
+ }
86
+
87
+ interface OpenAIChatCompletionRequest {
88
+ messages: OpenAIMessage[];
89
+ temperature?: number;
90
+ top_p?: number;
91
+ max_tokens?: number;
92
+ frequency_penalty?: number;
93
+ presence_penalty?: number;
94
+ stop?: string | string[];
95
+ }
96
+
97
+ interface OpenAIChoice {
98
+ index: number;
99
+ message: OpenAIMessage;
100
+ finish_reason: string;
101
+ }
102
+
103
+ interface OpenAIUsage {
104
+ prompt_tokens: number;
105
+ completion_tokens: number;
106
+ total_tokens: number;
107
+ }
108
+
109
+ interface OpenAIChatCompletionResponse {
110
+ id: string;
111
+ object: 'chat.completion';
112
+ created: number;
113
+ model: string;
114
+ choices: OpenAIChoice[];
115
+ usage: OpenAIUsage;
116
+ }
117
+
118
+ // ─── AWS Bedrock Compatibility Types ─────────────────────────────────
119
+
120
+ interface BedrockTextGenerationConfig {
121
+ maxTokenCount?: number;
122
+ temperature?: number;
123
+ topP?: number;
124
+ topK?: number;
125
+ stopSequences?: string[];
126
+ }
127
+
128
+ interface BedrockInvokeRequest {
129
+ inputText: string;
130
+ textGenerationConfig?: BedrockTextGenerationConfig;
131
+ }
132
+
133
+ interface BedrockResult {
134
+ outputText: string;
135
+ tokenCount: number;
136
+ }
137
+
138
+ interface BedrockInvokeResponse {
139
+ results: BedrockResult[];
140
+ input_text_token_count?: number;
141
+ }
142
+
143
+ // ─── Fallback Configuration ─────────────────────────────────────────
144
+
145
+ type FallbackProvider = 'openai' | 'bedrock';
146
+
147
+ interface FallbackConfig {
148
+ provider: FallbackProvider;
149
+ apiKey: string;
150
+ model: string;
151
+ region?: string; // for Bedrock
152
+ }
153
+
154
+ interface SlyOSConfigWithFallback extends SlyOSConfig {
155
+ fallback?: FallbackConfig;
156
+ }
157
+
158
+ // ─── OpenAI Compatible Client ───────────────────────────────────────
159
+
160
+ interface OpenAICompatibleClient {
161
+ chat: {
162
+ completions: {
163
+ create(request: OpenAIChatCompletionRequest & { model: string }): Promise<OpenAIChatCompletionResponse>;
164
+ };
165
+ };
166
+ }
167
+
168
+ // ─── RAG Types ──────────────────────────────────────────────────
169
+
170
+ interface RAGOptions {
171
+ knowledgeBaseId: string;
172
+ query: string;
173
+ topK?: number;
174
+ modelId: string;
175
+ temperature?: number;
176
+ maxTokens?: number;
177
+ }
178
+
179
+ interface RAGChunk {
180
+ id: string;
181
+ documentId: string;
182
+ documentName: string;
183
+ content: string;
184
+ similarityScore: number;
185
+ metadata?: Record<string, any>;
186
+ }
187
+
188
+ interface RAGResponse {
189
+ query: string;
190
+ retrievedChunks: RAGChunk[];
191
+ generatedResponse: string;
192
+ context: string;
193
+ latencyMs: number;
194
+ tierUsed: 1 | 2 | 3;
195
+ }
196
+
197
+ interface OfflineIndex {
198
+ metadata: {
199
+ kb_id: string;
200
+ kb_name: string;
201
+ chunk_size: number;
202
+ embedding_dim: number;
203
+ total_chunks: number;
204
+ synced_at: string;
205
+ expires_at: string;
206
+ sync_token: string;
207
+ };
208
+ chunks: Array<{
209
+ id: string;
210
+ document_id: string;
211
+ document_name: string;
212
+ content: string;
213
+ chunk_index: number;
214
+ embedding: number[] | null;
215
+ metadata: Record<string, any>;
216
+ }>;
217
+ }
218
+
219
+ // ─── Model Registry ─────────────────────────────────────────────────
220
+
221
+ const modelMap: Record<string, ModelInfo> = {
222
+ // LLM models (1B+)
223
+ 'quantum-1.7b': {
224
+ hfModel: 'HuggingFaceTB/SmolLM2-1.7B-Instruct',
225
+ task: 'text-generation',
226
+ category: 'llm',
227
+ sizesMB: { q4: 900, q8: 1700, fp16: 3400, fp32: 6800 },
228
+ minRAM_MB: { q4: 2048, q8: 3072, fp16: 5120, fp32: 8192 },
229
+ },
230
+ 'quantum-3b': {
231
+ hfModel: 'Qwen/Qwen2.5-3B-Instruct',
232
+ task: 'text-generation',
233
+ category: 'llm',
234
+ sizesMB: { q4: 1600, q8: 3200, fp16: 6400, fp32: 12800 },
235
+ minRAM_MB: { q4: 3072, q8: 5120, fp16: 8192, fp32: 16384 },
236
+ },
237
+ 'quantum-code-3b': {
238
+ hfModel: 'Qwen/Qwen2.5-Coder-3B-Instruct',
239
+ task: 'text-generation',
240
+ category: 'llm',
241
+ sizesMB: { q4: 1600, q8: 3200, fp16: 6400, fp32: 12800 },
242
+ minRAM_MB: { q4: 3072, q8: 5120, fp16: 8192, fp32: 16384 },
243
+ },
244
+ 'quantum-8b': {
245
+ hfModel: 'Qwen/Qwen2.5-7B-Instruct',
246
+ task: 'text-generation',
247
+ category: 'llm',
248
+ sizesMB: { q4: 4200, q8: 8400, fp16: 16800, fp32: 33600 },
249
+ minRAM_MB: { q4: 6144, q8: 10240, fp16: 20480, fp32: 40960 },
250
+ },
251
+ // STT models
252
+ 'voicecore-base': {
253
+ hfModel: 'onnx-community/whisper-base',
254
+ task: 'automatic-speech-recognition',
255
+ category: 'stt',
256
+ sizesMB: { q4: 40, q8: 75, fp16: 150, fp32: 300 },
257
+ minRAM_MB: { q4: 512, q8: 512, fp16: 1024, fp32: 2048 },
258
+ },
259
+ 'voicecore-small': {
260
+ hfModel: 'onnx-community/whisper-small',
261
+ task: 'automatic-speech-recognition',
262
+ category: 'stt',
263
+ sizesMB: { q4: 100, q8: 200, fp16: 400, fp32: 800 },
264
+ minRAM_MB: { q4: 1024, q8: 1024, fp16: 2048, fp32: 4096 },
265
+ },
266
+ };
267
+
268
+ // ─── Context Window Sizing ──────────────────────────────────────────
269
+
270
+ function recommendContextWindow(memoryMB: number, quant: QuantizationLevel): number {
271
+ // More RAM + smaller quant = larger context window
272
+ const base = quant === 'q4' ? 1024 : quant === 'q8' ? 2048 : quant === 'fp16' ? 4096 : 8192;
273
+
274
+ if (memoryMB >= 16384) return Math.min(base * 4, 32768);
275
+ if (memoryMB >= 8192) return Math.min(base * 2, 16384);
276
+ if (memoryMB >= 4096) return base;
277
+ return Math.max(512, Math.floor(base / 2));
278
+ }
279
+
280
+ function selectQuantization(memoryMB: number, modelId: string): QuantizationLevel {
281
+ const info = modelMap[modelId];
282
+ if (!info) return 'q4';
283
+
284
+ // ONNX/WASM has protobuf size limits — fp16 files >2GB crash on many systems.
285
+ // For LLMs, cap at q4 via WASM. FP16/Q8 need native backends (llama.cpp).
286
+ // STT models are small enough for q8/fp16.
287
+ if (info.category === 'llm') {
288
+ return 'q4'; // safest for ONNX/WASM across all platforms
289
+ }
290
+
291
+ // STT models: try from best quality down
292
+ const quants: QuantizationLevel[] = ['fp16', 'q8', 'q4'];
293
+ for (const q of quants) {
294
+ if (memoryMB >= info.minRAM_MB[q]) return q;
295
+ }
296
+ return 'q4'; // fallback
297
+ }
298
+
299
+ // ─── Context Window Detection ──────────────────────────────────────
300
+
301
+ async function detectContextWindowFromHF(hfModelId: string): Promise<number> {
302
+ try {
303
+ const configUrl = `https://huggingface.co/${hfModelId}/raw/main/config.json`;
304
+ const response = await axios.get(configUrl, { timeout: 5000 });
305
+ const config = response.data;
306
+
307
+ // Try multiple context window field names
308
+ const contextWindow =
309
+ config.max_position_embeddings ||
310
+ config.n_positions ||
311
+ config.max_seq_len ||
312
+ config.model_max_length ||
313
+ 2048;
314
+
315
+ return contextWindow;
316
+ } catch {
317
+ // Default if config cannot be fetched
318
+ return 2048;
319
+ }
320
+ }
321
+
322
+ // ─── SDK Version ────────────────────────────────────────────────────
323
+ const SDK_VERSION = '1.4.1';
324
+
325
+ // ─── Persistent Device Identity ─────────────────────────────────────
326
+
327
+ async function hashString(str: string): Promise<string> {
328
+ const isNode = typeof window === 'undefined';
329
+ if (isNode) {
330
+ const crypto = await import('crypto');
331
+ return crypto.createHash('sha256').update(str).digest('hex').substring(0, 32);
332
+ } else {
333
+ const encoder = new TextEncoder();
334
+ const data = encoder.encode(str);
335
+ const hashBuffer = await crypto.subtle.digest('SHA-256', data);
336
+ return Array.from(new Uint8Array(hashBuffer))
337
+ .map(b => b.toString(16).padStart(2, '0'))
338
+ .join('')
339
+ .substring(0, 32);
340
+ }
341
+ }
342
+
343
+ async function getOrCreateDeviceId(): Promise<string> {
344
+ const isNode = typeof window === 'undefined';
345
+
346
+ if (isNode) {
347
+ // Node.js: persist in ~/.slyos/device-id
348
+ try {
349
+ const fs = await import('fs');
350
+ const path = await import('path');
351
+ const os = await import('os');
352
+ const slyosDir = path.join(os.homedir(), '.slyos');
353
+ const idFile = path.join(slyosDir, 'device-id');
354
+
355
+ try {
356
+ const existing = fs.readFileSync(idFile, 'utf-8').trim();
357
+ if (existing) return existing;
358
+ } catch {}
359
+
360
+ const deviceId = `device-${Date.now()}-${Math.random().toString(36).substr(2, 12)}`;
361
+ fs.mkdirSync(slyosDir, { recursive: true });
362
+ fs.writeFileSync(idFile, deviceId);
363
+ return deviceId;
364
+ } catch {
365
+ return `device-${Date.now()}-${Math.random().toString(36).substr(2, 12)}`;
366
+ }
367
+ } else {
368
+ // Browser: persist in localStorage
369
+ const key = 'slyos_device_id';
370
+ try {
371
+ const existing = localStorage.getItem(key);
372
+ if (existing) return existing;
373
+ } catch {}
374
+
375
+ const deviceId = `device-${Date.now()}-${Math.random().toString(36).substr(2, 12)}`;
376
+ try { localStorage.setItem(key, deviceId); } catch {}
377
+ return deviceId;
378
+ }
379
+ }
380
+
381
+ async function generateDeviceFingerprint(): Promise<string> {
382
+ const isNode = typeof window === 'undefined';
383
+ let components: string[] = [];
384
+
385
+ if (isNode) {
386
+ try {
387
+ const os = await import('os');
388
+ const cpus = os.cpus();
389
+ components.push(cpus[0]?.model || 'unknown-cpu');
390
+ components.push(String(os.totalmem()));
391
+ components.push(os.platform());
392
+ components.push(os.arch());
393
+ components.push(String(cpus.length));
394
+ } catch {}
395
+ } else {
396
+ components.push(String(navigator.hardwareConcurrency || 0));
397
+ components.push(String((navigator as any).deviceMemory || 0));
398
+ components.push(navigator.platform || 'unknown');
399
+ // WebGL renderer for GPU fingerprint
400
+ try {
401
+ const canvas = document.createElement('canvas');
402
+ const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl') as WebGLRenderingContext | null;
403
+ if (gl) {
404
+ const ext = gl.getExtension('WEBGL_debug_renderer_info');
405
+ if (ext) {
406
+ components.push(gl.getParameter(ext.UNMASKED_RENDERER_WEBGL) || 'unknown-gpu');
407
+ }
408
+ }
409
+ } catch {}
410
+ components.push(String(screen.width || 0));
411
+ components.push(String(screen.height || 0));
412
+ }
413
+
414
+ return await hashString(components.join('|'));
415
+ }
416
+
417
+ // ─── Enhanced Device Profiling ──────────────────────────────────────
418
+
419
+ function detectGPU(): { renderer: string | null; vramMb: number } {
420
+ if (typeof window === 'undefined') return { renderer: null, vramMb: 0 };
421
+ try {
422
+ const canvas = document.createElement('canvas');
423
+ const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl') as WebGLRenderingContext | null;
424
+ if (!gl) return { renderer: null, vramMb: 0 };
425
+ const ext = gl.getExtension('WEBGL_debug_renderer_info');
426
+ const renderer = ext ? gl.getParameter(ext.UNMASKED_RENDERER_WEBGL) : null;
427
+ // Rough VRAM estimate from renderer string
428
+ let vramMb = 0;
429
+ if (renderer) {
430
+ const match = renderer.match(/(\d+)\s*MB/i);
431
+ if (match) vramMb = parseInt(match[1]);
432
+ else if (/RTX\s*40/i.test(renderer)) vramMb = 8192;
433
+ else if (/RTX\s*30/i.test(renderer)) vramMb = 6144;
434
+ else if (/GTX/i.test(renderer)) vramMb = 4096;
435
+ else if (/Apple M[2-4]/i.test(renderer)) vramMb = 8192;
436
+ else if (/Apple M1/i.test(renderer)) vramMb = 4096;
437
+ else if (/Intel/i.test(renderer)) vramMb = 1024;
438
+ }
439
+ return { renderer, vramMb };
440
+ } catch {
441
+ return { renderer: null, vramMb: 0 };
442
+ }
443
+ }
444
+
445
+ function detectBrowser(): { name: string; version: string } {
446
+ if (typeof window === 'undefined' || typeof navigator === 'undefined') return { name: 'node', version: process.version || 'unknown' };
447
+ const ua = navigator.userAgent;
448
+ if (/Edg\//i.test(ua)) { const m = ua.match(/Edg\/([\d.]+)/); return { name: 'Edge', version: m?.[1] || '' }; }
449
+ if (/Chrome\//i.test(ua)) { const m = ua.match(/Chrome\/([\d.]+)/); return { name: 'Chrome', version: m?.[1] || '' }; }
450
+ if (/Firefox\//i.test(ua)) { const m = ua.match(/Firefox\/([\d.]+)/); return { name: 'Firefox', version: m?.[1] || '' }; }
451
+ if (/Safari\//i.test(ua)) { const m = ua.match(/Version\/([\d.]+)/); return { name: 'Safari', version: m?.[1] || '' }; }
452
+ return { name: 'unknown', version: '' };
453
+ }
454
+
455
+ function detectNetworkType(): string {
456
+ if (typeof navigator === 'undefined') return 'unknown';
457
+ const conn = (navigator as any).connection || (navigator as any).mozConnection || (navigator as any).webkitConnection;
458
+ if (!conn) return 'unknown';
459
+ return conn.effectiveType || conn.type || 'unknown';
460
+ }
461
+
462
+ async function measureApiLatency(apiUrl: string): Promise<number> {
463
+ try {
464
+ const start = Date.now();
465
+ await axios.head(`${apiUrl}/api/health`, { timeout: 5000 });
466
+ return Date.now() - start;
467
+ } catch {
468
+ try {
469
+ const start = Date.now();
470
+ await axios.get(`${apiUrl}/api/health`, { timeout: 5000 });
471
+ return Date.now() - start;
472
+ } catch {
473
+ return -1;
474
+ }
475
+ }
476
+ }
477
+
478
+ // ─── Device Profiling ───────────────────────────────────────────────
479
+
480
+ async function profileDevice(): Promise<DeviceProfile> {
481
+ const isNode = typeof window === 'undefined';
482
+ let cpuCores = 4;
483
+ let memoryMB = 4096;
484
+ let estimatedStorageMB = 10000;
485
+ let platform: 'web' | 'nodejs' = isNode ? 'nodejs' : 'web';
486
+ let os = 'unknown';
487
+
488
+ if (isNode) {
489
+ // Node.js environment
490
+ try {
491
+ const osModule = await import('os');
492
+ cpuCores = osModule.cpus().length;
493
+ memoryMB = Math.round(osModule.totalmem() / (1024 * 1024));
494
+ os = `${osModule.platform()} ${osModule.release()}`;
495
+
496
+ // Estimate free disk via df-like check
497
+ try {
498
+ const { execSync } = await import('child_process');
499
+ const dfOutput = execSync('df -m . 2>/dev/null || echo "0 0 0 0"', { encoding: 'utf-8' });
500
+ const lines = dfOutput.trim().split('\n');
501
+ if (lines.length > 1) {
502
+ const parts = lines[1].split(/\s+/);
503
+ estimatedStorageMB = parseInt(parts[3]) || 10000; // Available column
504
+ }
505
+ } catch {
506
+ estimatedStorageMB = 10000;
507
+ }
508
+ } catch {
509
+ // Fallback
510
+ }
511
+ } else {
512
+ // Browser environment
513
+ cpuCores = navigator.hardwareConcurrency || 4;
514
+ memoryMB = ((navigator as any).deviceMemory || 4) * 1024; // deviceMemory is in GB
515
+ os = navigator.userAgent;
516
+
517
+ // Storage Manager API (Chrome 61+)
518
+ try {
519
+ if (navigator.storage && navigator.storage.estimate) {
520
+ const estimate = await navigator.storage.estimate();
521
+ estimatedStorageMB = Math.round((estimate.quota || 0) / (1024 * 1024));
522
+ }
523
+ } catch {
524
+ estimatedStorageMB = 5000;
525
+ }
526
+ }
527
+
528
+ const recommendedQuant = selectQuantization(memoryMB, 'quantum-1.7b'); // default baseline
529
+ const maxContextWindow = recommendContextWindow(memoryMB, recommendedQuant);
530
+
531
+ // Enhanced profiling
532
+ const gpu = detectGPU();
533
+ const browser = detectBrowser();
534
+ const networkType = detectNetworkType();
535
+ const timezone = Intl?.DateTimeFormat?.()?.resolvedOptions?.()?.timeZone || 'unknown';
536
+
537
+ let screenWidth = 0, screenHeight = 0, pixelRatio = 0;
538
+ let wasmAvailable = false, webgpuAvailable = false;
539
+
540
+ if (!isNode) {
541
+ screenWidth = screen?.width || 0;
542
+ screenHeight = screen?.height || 0;
543
+ pixelRatio = window?.devicePixelRatio || 1;
544
+ }
545
+
546
+ // Capability detection
547
+ try { wasmAvailable = typeof WebAssembly !== 'undefined'; } catch {}
548
+ if (!isNode) {
549
+ try { webgpuAvailable = !!(navigator as any).gpu; } catch {}
550
+ }
551
+
552
+ return {
553
+ cpuCores,
554
+ memoryMB,
555
+ estimatedStorageMB,
556
+ platform,
557
+ os,
558
+ recommendedQuant,
559
+ maxContextWindow,
560
+ gpuRenderer: gpu.renderer || undefined,
561
+ gpuVramMb: gpu.vramMb || undefined,
562
+ screenWidth: screenWidth || undefined,
563
+ screenHeight: screenHeight || undefined,
564
+ pixelRatio: pixelRatio || undefined,
565
+ browserName: browser.name,
566
+ browserVersion: browser.version,
567
+ networkType,
568
+ timezone,
569
+ wasmAvailable,
570
+ webgpuAvailable,
571
+ };
572
+ }
573
+
574
+ // ─── Main SDK Class ─────────────────────────────────────────────────
575
+
576
+ interface TelemetryEntry {
577
+ latency_ms: number;
578
+ tokens_generated: number;
579
+ success: boolean;
580
+ model_id: string;
581
+ timestamp: number;
582
+ }
583
+
584
+ class SlyOS {
585
+ private apiKey: string;
586
+ private apiUrl: string;
587
+ private deviceId: string;
588
+ private token: string | null = null;
589
+ private models: Map<string, any> = new Map();
590
+ private deviceProfile: DeviceProfile | null = null;
591
+ private onProgress: ProgressCallback | null;
592
+ private onEvent: EventCallback | null;
593
+ private fallbackConfig: FallbackConfig | null;
594
+ private modelContextWindow: number = 0;
595
+ // Telemetry batching
596
+ private telemetryBuffer: TelemetryEntry[] = [];
597
+ private telemetryFlushTimer: any = null;
598
+ private static readonly TELEMETRY_BATCH_SIZE = 10;
599
+ private static readonly TELEMETRY_FLUSH_INTERVAL = 60000; // 60 seconds
600
+
601
+ constructor(config: SlyOSConfigWithFallback) {
602
+ this.apiKey = config.apiKey;
603
+ this.apiUrl = config.apiUrl || 'https://api.slyos.world';
604
+ this.deviceId = ''; // Set asynchronously in initialize()
605
+ this.onProgress = config.onProgress || null;
606
+ this.onEvent = config.onEvent || null;
607
+ this.fallbackConfig = config.fallback || null;
608
+ }
609
+
610
+ // ── Progress & Event Helpers ────────────────────────────────────
611
+
612
+ private emitProgress(stage: ProgressEvent['stage'], progress: number, message: string, detail?: any) {
613
+ if (this.onProgress) {
614
+ this.onProgress({ stage, progress, message, detail });
615
+ }
616
+ }
617
+
618
+ private emitEvent(type: SlyEvent['type'], data?: any) {
619
+ if (this.onEvent) {
620
+ this.onEvent({ type, data, timestamp: Date.now() });
621
+ }
622
+ }
623
+
624
+ // ── Telemetry Batching ─────────────────────────────────────────
625
+
626
+ private recordTelemetry(entry: TelemetryEntry) {
627
+ this.telemetryBuffer.push(entry);
628
+ if (this.telemetryBuffer.length >= SlyOS.TELEMETRY_BATCH_SIZE) {
629
+ this.flushTelemetry();
630
+ } else if (!this.telemetryFlushTimer) {
631
+ this.telemetryFlushTimer = setTimeout(() => this.flushTelemetry(), SlyOS.TELEMETRY_FLUSH_INTERVAL);
632
+ }
633
+ }
634
+
635
+ private async flushTelemetry() {
636
+ if (this.telemetryFlushTimer) {
637
+ clearTimeout(this.telemetryFlushTimer);
638
+ this.telemetryFlushTimer = null;
639
+ }
640
+ if (this.telemetryBuffer.length === 0 || !this.token) return;
641
+
642
+ const batch = [...this.telemetryBuffer];
643
+ this.telemetryBuffer = [];
644
+
645
+ try {
646
+ await axios.post(`${this.apiUrl}/api/devices/telemetry`, {
647
+ device_id: this.deviceId,
648
+ metrics: batch,
649
+ }, {
650
+ headers: { Authorization: `Bearer ${this.token}` },
651
+ timeout: 10000,
652
+ });
653
+ this.emitEvent('telemetry_flushed', { count: batch.length });
654
+ } catch {
655
+ // Put back on failure for next attempt
656
+ this.telemetryBuffer.unshift(...batch);
657
+ // Cap buffer to prevent memory leak
658
+ if (this.telemetryBuffer.length > 100) {
659
+ this.telemetryBuffer = this.telemetryBuffer.slice(-100);
660
+ }
661
+ }
662
+ }
663
+
664
+ // ── Device Analysis ─────────────────────────────────────────────
665
+
666
+ async analyzeDevice(): Promise<DeviceProfile> {
667
+ try {
668
+ this.emitProgress('profiling', 10, 'Analyzing device capabilities...');
669
+ this.deviceProfile = await profileDevice();
670
+ this.emitProgress('profiling', 100, `Device: ${this.deviceProfile.cpuCores} cores, ${Math.round(this.deviceProfile.memoryMB / 1024 * 10) / 10}GB RAM`);
671
+ this.emitEvent('device_profiled', this.deviceProfile);
672
+ return this.deviceProfile;
673
+ } catch (err: any) {
674
+ this.emitEvent('error', { method: 'analyzeDevice', error: err.message });
675
+ throw new Error(`Device analysis failed: ${err.message}`);
676
+ }
677
+ }
678
+
679
+ getDeviceProfile(): DeviceProfile | null {
680
+ return this.deviceProfile;
681
+ }
682
+
683
+ getModelContextWindow(): number {
684
+ return this.modelContextWindow;
685
+ }
686
+
687
+ getDeviceId(): string {
688
+ return this.deviceId;
689
+ }
690
+
691
+ getSdkVersion(): string {
692
+ return SDK_VERSION;
693
+ }
694
+
695
+ // Flush remaining telemetry and clean up timers
696
+ async destroy(): Promise<void> {
697
+ await this.flushTelemetry();
698
+ if (this.telemetryFlushTimer) {
699
+ clearTimeout(this.telemetryFlushTimer);
700
+ this.telemetryFlushTimer = null;
701
+ }
702
+ }
703
+
704
+ // ── Smart Model Recommendation ──────────────────────────────────
705
+
706
+ recommendModel(category: ModelCategory = 'llm'): { modelId: string; quant: QuantizationLevel; contextWindow: number; reason: string } | null {
707
+ if (!this.deviceProfile) {
708
+ throw new Error('Call analyzeDevice() first to get a recommendation.');
709
+ }
710
+
711
+ const mem = this.deviceProfile.memoryMB;
712
+ const candidates = Object.entries(modelMap).filter(([_, info]) => info.category === category);
713
+
714
+ // Sort by size descending — pick the biggest model that fits
715
+ for (const [id, info] of candidates.sort((a, b) => b[1].sizesMB.q4 - a[1].sizesMB.q4)) {
716
+ const quant = selectQuantization(mem, id);
717
+ if (mem >= info.minRAM_MB[quant]) {
718
+ const ctx = recommendContextWindow(mem, quant);
719
+ return {
720
+ modelId: id,
721
+ quant,
722
+ contextWindow: ctx,
723
+ reason: `Best model for ${Math.round(mem / 1024)}GB RAM at ${quant.toUpperCase()} precision`,
724
+ };
725
+ }
726
+ }
727
+
728
+ // Fallback to smallest
729
+ const smallest = candidates.sort((a, b) => a[1].sizesMB.q4 - b[1].sizesMB.q4)[0];
730
+ if (smallest) {
731
+ return {
732
+ modelId: smallest[0],
733
+ quant: 'q4',
734
+ contextWindow: 512,
735
+ reason: 'Limited device memory — using smallest available model at Q4',
736
+ };
737
+ }
738
+
739
+ return null;
740
+ }
741
+
742
+ // ── Initialize ──────────────────────────────────────────────────
743
+
744
+ async initialize(): Promise<DeviceProfile> {
745
+ this.emitProgress('initializing', 0, 'Starting SlyOS...');
746
+
747
+ // Step 1: Persistent device ID
748
+ this.deviceId = await getOrCreateDeviceId();
749
+
750
+ // Step 2: Profile device (enhanced)
751
+ this.emitProgress('profiling', 5, 'Detecting device capabilities...');
752
+ this.deviceProfile = await profileDevice();
753
+
754
+ // Step 2b: Generate device fingerprint
755
+ this.deviceProfile.deviceFingerprint = await generateDeviceFingerprint();
756
+
757
+ this.emitProgress('profiling', 20, `Detected: ${this.deviceProfile.cpuCores} CPU cores, ${Math.round(this.deviceProfile.memoryMB / 1024 * 10) / 10}GB RAM${this.deviceProfile.gpuRenderer ? ', GPU: ' + this.deviceProfile.gpuRenderer.substring(0, 30) : ''}`);
758
+ this.emitEvent('device_profiled', this.deviceProfile);
759
+
760
+ // Step 3: Authenticate
761
+ this.emitProgress('initializing', 40, 'Authenticating with API key...');
762
+ try {
763
+ const authRes = await axios.post(`${this.apiUrl}/api/auth/sdk`, {
764
+ apiKey: this.apiKey,
765
+ });
766
+ this.token = authRes.data.token;
767
+ this.emitProgress('initializing', 60, 'Authenticated successfully');
768
+ this.emitEvent('auth', { success: true });
769
+ } catch (err: any) {
770
+ this.emitProgress('error', 0, `Authentication failed: ${err.message}`);
771
+ this.emitEvent('error', { stage: 'auth', error: err.message });
772
+ throw new Error(`SlyOS auth failed: ${err.response?.data?.error || err.message}`);
773
+ }
774
+
775
+ // Step 4: Measure API latency
776
+ const latency = await measureApiLatency(this.apiUrl);
777
+ if (latency > 0) this.deviceProfile.latencyToApiMs = latency;
778
+
779
+ // Step 5: Register device with full intelligence profile
780
+ this.emitProgress('initializing', 70, 'Registering device...');
781
+ try {
782
+ // Determine supported quantizations based on memory
783
+ const mem = this.deviceProfile.memoryMB;
784
+ const supportedQuants: string[] = ['q4'];
785
+ if (mem >= 4096) supportedQuants.push('q8');
786
+ if (mem >= 8192) supportedQuants.push('fp16');
787
+ if (mem >= 16384) supportedQuants.push('fp32');
788
+
789
+ // Determine recommended tier
790
+ let recommendedTier = 1;
791
+ if (mem >= 8192 && this.deviceProfile.cpuCores >= 4) recommendedTier = 2;
792
+ if (mem >= 16384 && this.deviceProfile.cpuCores >= 8) recommendedTier = 3;
793
+
794
+ await axios.post(`${this.apiUrl}/api/devices/register`, {
795
+ device_id: this.deviceId,
796
+ device_fingerprint: this.deviceProfile.deviceFingerprint,
797
+ platform: this.deviceProfile.platform,
798
+ os_version: this.deviceProfile.os,
799
+ total_memory_mb: this.deviceProfile.memoryMB,
800
+ cpu_cores: this.deviceProfile.cpuCores,
801
+ // Enhanced fields
802
+ gpu_renderer: this.deviceProfile.gpuRenderer || null,
803
+ gpu_vram_mb: this.deviceProfile.gpuVramMb || null,
804
+ screen_width: this.deviceProfile.screenWidth || null,
805
+ screen_height: this.deviceProfile.screenHeight || null,
806
+ pixel_ratio: this.deviceProfile.pixelRatio || null,
807
+ browser_name: this.deviceProfile.browserName || null,
808
+ browser_version: this.deviceProfile.browserVersion || null,
809
+ sdk_version: SDK_VERSION,
810
+ network_type: this.deviceProfile.networkType || null,
811
+ latency_to_api_ms: this.deviceProfile.latencyToApiMs || null,
812
+ timezone: this.deviceProfile.timezone || null,
813
+ // Capabilities
814
+ wasm_available: this.deviceProfile.wasmAvailable || false,
815
+ webgpu_available: this.deviceProfile.webgpuAvailable || false,
816
+ supported_quants: supportedQuants,
817
+ recommended_tier: recommendedTier,
818
+ }, {
819
+ headers: { Authorization: `Bearer ${this.token}` },
820
+ });
821
+ this.emitProgress('initializing', 90, 'Device registered');
822
+ this.emitEvent('device_registered', { deviceId: this.deviceId, fingerprint: this.deviceProfile.deviceFingerprint });
823
+ } catch (err: any) {
824
+ // Non-fatal — device registration shouldn't block usage
825
+ this.emitProgress('initializing', 90, 'Device registration skipped (non-fatal)');
826
+ }
827
+
828
+ // Step 6: Start telemetry flush timer
829
+ this.telemetryFlushTimer = setTimeout(() => this.flushTelemetry(), SlyOS.TELEMETRY_FLUSH_INTERVAL);
830
+
831
+ this.emitProgress('ready', 100, `SlyOS v${SDK_VERSION} ready — ${this.deviceProfile.recommendedQuant.toUpperCase()}, ${this.deviceProfile.gpuRenderer ? 'GPU detected' : 'CPU only'}`);
832
+
833
+ return this.deviceProfile;
834
+ }
835
+
836
+ // ── Model Loading ───────────────────────────────────────────────
837
+
838
+ getAvailableModels(): Record<string, { models: { id: string; sizesMB: Record<string, number>; minRAM_MB: Record<string, number> }[] }> {
839
+ const grouped: Record<string, any[]> = { llm: [], stt: [] };
840
+ for (const [id, info] of Object.entries(modelMap)) {
841
+ if (!grouped[info.category]) grouped[info.category] = [];
842
+ grouped[info.category].push({
843
+ id,
844
+ sizesMB: info.sizesMB,
845
+ minRAM_MB: info.minRAM_MB,
846
+ });
847
+ }
848
+ return Object.fromEntries(
849
+ Object.entries(grouped).map(([cat, models]) => [cat, { models }])
850
+ );
851
+ }
852
+
853
+ async searchModels(query: string, options?: { limit?: number; task?: string }): Promise<Array<{
854
+ id: string;
855
+ name: string;
856
+ downloads: number;
857
+ likes: number;
858
+ task: string;
859
+ size_category: string;
860
+ }>> {
861
+ try {
862
+ const limit = options?.limit || 20;
863
+ const filters = ['onnx']; // Filter for ONNX models only
864
+ if (options?.task) {
865
+ filters.push(options.task);
866
+ }
867
+
868
+ const filterString = filters.map(f => `"${f}"`).join(',');
869
+ const url = `https://huggingface.co/api/models?search=${encodeURIComponent(query)}&filter=${encodeURIComponent(`[${filterString}]`)}&sort=downloads&direction=-1&limit=${limit}`;
870
+
871
+ const response = await axios.get(url, { timeout: 10000 });
872
+ const models = Array.isArray(response.data) ? response.data : [];
873
+
874
+ return models.map((model: any) => ({
875
+ id: model.id,
876
+ name: model.id.split('/')[1] || model.id,
877
+ downloads: model.downloads || 0,
878
+ likes: model.likes || 0,
879
+ task: model.task || 'unknown',
880
+ size_category: model.size_category || 'unknown',
881
+ }));
882
+ } catch (error: any) {
883
+ this.emitEvent('error', { stage: 'model_search', error: error.message });
884
+ throw new Error(`Model search failed: ${error.message}`);
885
+ }
886
+ }
887
+
888
+ canRunModel(modelId: string, quant?: QuantizationLevel): { canRun: boolean; reason: string; recommendedQuant: QuantizationLevel } {
889
+ const info = modelMap[modelId];
890
+ if (!info) return { canRun: false, reason: `Unknown model "${modelId}"`, recommendedQuant: 'q4' };
891
+ if (!this.deviceProfile) return { canRun: true, reason: 'Device not profiled yet — call initialize() first', recommendedQuant: 'q4' };
892
+
893
+ const mem = this.deviceProfile.memoryMB;
894
+ const bestQuant = selectQuantization(mem, modelId);
895
+
896
+ if (quant && mem < info.minRAM_MB[quant]) {
897
+ return {
898
+ canRun: false,
899
+ reason: `Not enough RAM for ${quant.toUpperCase()} (need ${info.minRAM_MB[quant]}MB, have ${mem}MB). Try ${bestQuant.toUpperCase()} instead.`,
900
+ recommendedQuant: bestQuant,
901
+ };
902
+ }
903
+
904
+ if (mem < info.minRAM_MB.q4) {
905
+ return {
906
+ canRun: false,
907
+ reason: `Model requires at least ${info.minRAM_MB.q4}MB RAM even at Q4. Device has ${mem}MB.`,
908
+ recommendedQuant: 'q4',
909
+ };
910
+ }
911
+
912
+ return { canRun: true, reason: `OK at ${bestQuant.toUpperCase()} precision`, recommendedQuant: bestQuant };
913
+ }
914
+
915
+ async loadModel(modelId: string, options?: { quant?: QuantizationLevel }): Promise<void> {
916
+ const info = modelMap[modelId];
917
+ let hfModelId: string;
918
+ let task: string;
919
+ let estimatedSize: number;
920
+
921
+ // Handle curated models
922
+ if (info) {
923
+ hfModelId = info.hfModel;
924
+ task = info.task;
925
+
926
+ // Determine quantization
927
+ let quant: QuantizationLevel = options?.quant || 'fp32';
928
+ if (!options?.quant && this.deviceProfile) {
929
+ quant = selectQuantization(this.deviceProfile.memoryMB, modelId);
930
+ this.emitProgress('downloading', 0, `Auto-selected ${quant.toUpperCase()} quantization for your device`);
931
+ }
932
+
933
+ // Check feasibility
934
+ const check = this.canRunModel(modelId, quant);
935
+ if (!check.canRun) {
936
+ this.emitProgress('error', 0, check.reason);
937
+ throw new Error(check.reason);
938
+ }
939
+
940
+ estimatedSize = info.sizesMB[quant];
941
+ this.emitProgress('downloading', 0, `Downloading ${modelId} (${quant.toUpperCase()}, ~${estimatedSize}MB)...`);
942
+ this.emitEvent('model_download_start', { modelId, quant, estimatedSizeMB: estimatedSize });
943
+ } else {
944
+ // Handle custom HuggingFace models
945
+ hfModelId = modelId;
946
+ task = 'text-generation'; // Default task
947
+ estimatedSize = 2048; // Default estimate
948
+
949
+ this.emitProgress('downloading', 0, `Loading custom HuggingFace model: ${modelId}...`);
950
+ this.emitEvent('model_download_start', { modelId, custom: true, estimatedSizeMB: estimatedSize });
951
+ }
952
+
953
+ // Map quant to dtype for HuggingFace
954
+ const dtypeMap: Record<QuantizationLevel, string> = {
955
+ q4: 'q4',
956
+ q8: 'q8',
957
+ fp16: 'fp16',
958
+ fp32: 'fp32',
959
+ };
960
+
961
+ let lastReportedPercent = 0;
962
+ const startTime = Date.now();
963
+
964
+ try {
965
+ // For custom HF models, detect context window
966
+ let detectedContextWindow = 2048;
967
+ if (!info) {
968
+ detectedContextWindow = await detectContextWindowFromHF(hfModelId);
969
+ }
970
+
971
+ const pipe = await pipeline(task as any, hfModelId, {
972
+ device: 'cpu',
973
+ dtype: 'q4' as any, // Default to q4 for stability
974
+ progress_callback: (progressData: any) => {
975
+ // HuggingFace transformers sends progress events during download
976
+ if (progressData && typeof progressData === 'object') {
977
+ let percent = 0;
978
+ let msg = 'Downloading...';
979
+
980
+ if (progressData.status === 'progress' && progressData.progress !== undefined) {
981
+ percent = Math.round(progressData.progress);
982
+ const loaded = progressData.loaded ? `${Math.round(progressData.loaded / 1024 / 1024)}MB` : '';
983
+ const total = progressData.total ? `${Math.round(progressData.total / 1024 / 1024)}MB` : '';
984
+ msg = loaded && total ? `Downloading: ${loaded} / ${total}` : `Downloading: ${percent}%`;
985
+ } else if (progressData.status === 'done') {
986
+ percent = 100;
987
+ msg = progressData.file ? `Downloaded ${progressData.file}` : 'Download complete';
988
+ } else if (progressData.status === 'initiate') {
989
+ msg = progressData.file ? `Starting download: ${progressData.file}` : 'Initiating download...';
990
+ }
991
+
992
+ // Only emit if progress meaningfully changed (avoid flooding)
993
+ if (percent !== lastReportedPercent || progressData.status === 'done' || progressData.status === 'initiate') {
994
+ lastReportedPercent = percent;
995
+ this.emitProgress('downloading', percent, msg, progressData);
996
+ this.emitEvent('model_download_progress', { modelId, percent, ...progressData });
997
+ }
998
+ }
999
+ },
1000
+ });
1001
+
1002
+ const loadTime = Date.now() - startTime;
1003
+ let contextWindow: number;
1004
+
1005
+ if (info) {
1006
+ // For curated models, use recommendContextWindow
1007
+ const quant = options?.quant || (this.deviceProfile ? selectQuantization(this.deviceProfile.memoryMB, modelId) : 'q4');
1008
+ contextWindow = this.deviceProfile
1009
+ ? recommendContextWindow(this.deviceProfile.memoryMB, quant)
1010
+ : 2048;
1011
+ } else {
1012
+ // For custom HF models, use detected context window
1013
+ contextWindow = detectedContextWindow;
1014
+ }
1015
+
1016
+ this.modelContextWindow = contextWindow;
1017
+ this.models.set(modelId, { pipe, info, quant: 'q4', contextWindow });
1018
+
1019
+ this.emitProgress('ready', 100, `${modelId} loaded (q4, ${(loadTime / 1000).toFixed(1)}s, ctx: ${contextWindow})`);
1020
+ this.emitEvent('model_loaded', { modelId, quant: 'q4', loadTimeMs: loadTime, contextWindow });
1021
+
1022
+ // Telemetry
1023
+ if (this.token) {
1024
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1025
+ device_id: this.deviceId,
1026
+ event_type: 'model_load',
1027
+ model_id: modelId,
1028
+ success: true,
1029
+ metadata: { quant: 'q4', loadTimeMs: loadTime, contextWindow, custom: !info },
1030
+ }, {
1031
+ headers: { Authorization: `Bearer ${this.token}` },
1032
+ }).catch(() => {});
1033
+ }
1034
+ } catch (error: any) {
1035
+ this.emitProgress('error', 0, `Failed to load ${modelId}: ${error.message}`);
1036
+ this.emitEvent('error', { stage: 'model_load', modelId, error: error.message });
1037
+
1038
+ if (this.token) {
1039
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1040
+ device_id: this.deviceId,
1041
+ event_type: 'model_load',
1042
+ model_id: modelId,
1043
+ success: false,
1044
+ error_message: error.message,
1045
+ }, {
1046
+ headers: { Authorization: `Bearer ${this.token}` },
1047
+ }).catch(() => {});
1048
+ }
1049
+ throw error;
1050
+ }
1051
+ }
1052
+
1053
+ // ── Inference: Generate ─────────────────────────────────────────
1054
+
1055
+ async generate(modelId: string, prompt: string, options: GenerateOptions = {}): Promise<string> {
1056
+ if (!this.models.has(modelId)) {
1057
+ await this.loadModel(modelId);
1058
+ }
1059
+
1060
+ const loaded = this.models.get(modelId);
1061
+ if (!loaded) {
1062
+ throw new Error(`Model "${modelId}" failed to load. Check your connection and model ID.`);
1063
+ }
1064
+ const { pipe, info, contextWindow } = loaded;
1065
+ if (info.category !== 'llm') {
1066
+ throw new Error(`Model "${modelId}" is not an LLM. Use transcribe() for STT models.`);
1067
+ }
1068
+
1069
+ const maxTokens = Math.min(options.maxTokens || 100, contextWindow || 2048);
1070
+
1071
+ this.emitProgress('generating', 0, `Generating response (max ${maxTokens} tokens)...`);
1072
+ this.emitEvent('inference_start', { modelId, maxTokens });
1073
+ const startTime = Date.now();
1074
+
1075
+ try {
1076
+ const result = await pipe(prompt, {
1077
+ max_new_tokens: maxTokens,
1078
+ temperature: options.temperature || 0.7,
1079
+ top_p: options.topP || 0.9,
1080
+ do_sample: true,
1081
+ });
1082
+
1083
+ const rawOutput = result[0].generated_text;
1084
+ // HuggingFace transformers returns the prompt + generated text concatenated.
1085
+ // Strip the original prompt so we only return the NEW tokens.
1086
+ const response = rawOutput.startsWith(prompt)
1087
+ ? rawOutput.slice(prompt.length).trim()
1088
+ : rawOutput.trim();
1089
+ const latency = Date.now() - startTime;
1090
+ const tokensGenerated = response.split(/\s+/).length;
1091
+ const tokensPerSec = (tokensGenerated / (latency / 1000)).toFixed(1);
1092
+
1093
+ this.emitProgress('ready', 100, `Generated ${tokensGenerated} tokens in ${(latency / 1000).toFixed(1)}s (${tokensPerSec} tok/s)`);
1094
+ this.emitEvent('inference_complete', { modelId, latencyMs: latency, tokensGenerated, tokensPerSec: parseFloat(tokensPerSec) });
1095
+
1096
+ // Batch telemetry (new device intelligence)
1097
+ this.recordTelemetry({
1098
+ latency_ms: latency,
1099
+ tokens_generated: tokensGenerated,
1100
+ success: true,
1101
+ model_id: modelId,
1102
+ timestamp: Date.now(),
1103
+ });
1104
+
1105
+ // Legacy telemetry (backwards compatible)
1106
+ if (this.token) {
1107
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1108
+ device_id: this.deviceId,
1109
+ event_type: 'inference',
1110
+ model_id: modelId,
1111
+ latency_ms: latency,
1112
+ tokens_generated: tokensGenerated,
1113
+ success: true,
1114
+ }, {
1115
+ headers: { Authorization: `Bearer ${this.token}` },
1116
+ }).catch(() => {});
1117
+ }
1118
+
1119
+ return response;
1120
+ } catch (error: any) {
1121
+ this.emitProgress('error', 0, `Generation failed: ${error.message}`);
1122
+ this.emitEvent('error', { stage: 'inference', modelId, error: error.message });
1123
+
1124
+ // Batch telemetry (failure)
1125
+ this.recordTelemetry({
1126
+ latency_ms: 0,
1127
+ tokens_generated: 0,
1128
+ success: false,
1129
+ model_id: modelId,
1130
+ timestamp: Date.now(),
1131
+ });
1132
+
1133
+ if (this.token) {
1134
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1135
+ device_id: this.deviceId,
1136
+ event_type: 'inference',
1137
+ model_id: modelId,
1138
+ success: false,
1139
+ error_message: error.message,
1140
+ }, {
1141
+ headers: { Authorization: `Bearer ${this.token}` },
1142
+ }).catch(() => {});
1143
+ }
1144
+ throw error;
1145
+ }
1146
+ }
1147
+
1148
+ // ── Inference: Transcribe ───────────────────────────────────────
1149
+
1150
+ async transcribe(modelId: string, audioInput: any, options: TranscribeOptions = {}): Promise<string> {
1151
+ if (!this.models.has(modelId)) {
1152
+ await this.loadModel(modelId);
1153
+ }
1154
+
1155
+ const loaded = this.models.get(modelId);
1156
+ if (!loaded) {
1157
+ throw new Error(`Model "${modelId}" failed to load. Check your connection and model ID.`);
1158
+ }
1159
+ const { pipe, info } = loaded;
1160
+ if (info.category !== 'stt') {
1161
+ throw new Error(`Model "${modelId}" is not an STT model. Use generate() for LLMs.`);
1162
+ }
1163
+
1164
+ this.emitProgress('transcribing', 0, 'Transcribing audio...');
1165
+ this.emitEvent('inference_start', { modelId, type: 'transcription' });
1166
+ const startTime = Date.now();
1167
+
1168
+ try {
1169
+ const result = await pipe(audioInput, {
1170
+ language: options.language || 'en',
1171
+ return_timestamps: options.returnTimestamps || false,
1172
+ });
1173
+
1174
+ const text = result.text;
1175
+ const latency = Date.now() - startTime;
1176
+
1177
+ this.emitProgress('ready', 100, `Transcribed in ${(latency / 1000).toFixed(1)}s`);
1178
+ this.emitEvent('inference_complete', { modelId, latencyMs: latency, type: 'transcription' });
1179
+
1180
+ if (this.token) {
1181
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1182
+ device_id: this.deviceId,
1183
+ event_type: 'inference',
1184
+ model_id: modelId,
1185
+ latency_ms: latency,
1186
+ success: true,
1187
+ }, {
1188
+ headers: { Authorization: `Bearer ${this.token}` },
1189
+ }).catch(() => {});
1190
+ }
1191
+
1192
+ return text;
1193
+ } catch (error: any) {
1194
+ this.emitProgress('error', 0, `Transcription failed: ${error.message}`);
1195
+ this.emitEvent('error', { stage: 'transcription', modelId, error: error.message });
1196
+
1197
+ if (this.token) {
1198
+ await axios.post(`${this.apiUrl}/api/telemetry`, {
1199
+ device_id: this.deviceId,
1200
+ event_type: 'inference',
1201
+ model_id: modelId,
1202
+ success: false,
1203
+ error_message: error.message,
1204
+ }, {
1205
+ headers: { Authorization: `Bearer ${this.token}` },
1206
+ }).catch(() => {});
1207
+ }
1208
+ throw error;
1209
+ }
1210
+ }
1211
+
1212
+ // ── OpenAI Compatibility ────────────────────────────────────────────
1213
+
1214
+ async chatCompletion(modelId: string, request: OpenAIChatCompletionRequest): Promise<OpenAIChatCompletionResponse> {
1215
+ try {
1216
+ // Convert OpenAI message format to a prompt string
1217
+ const prompt = request.messages
1218
+ .map(msg => {
1219
+ if (msg.role === 'system') {
1220
+ return `System: ${msg.content}`;
1221
+ } else if (msg.role === 'user') {
1222
+ return `User: ${msg.content}`;
1223
+ } else {
1224
+ return `Assistant: ${msg.content}`;
1225
+ }
1226
+ })
1227
+ .join('\n\n');
1228
+
1229
+ const response = await this.generate(modelId, prompt, {
1230
+ temperature: request.temperature,
1231
+ maxTokens: request.max_tokens,
1232
+ topP: request.top_p,
1233
+ });
1234
+
1235
+ // Estimate token counts (rough approximation: ~4 chars per token)
1236
+ const promptTokens = Math.ceil(prompt.length / 4);
1237
+ const completionTokens = Math.ceil(response.length / 4);
1238
+
1239
+ return {
1240
+ id: `chat-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
1241
+ object: 'chat.completion',
1242
+ created: Math.floor(Date.now() / 1000),
1243
+ model: modelId,
1244
+ choices: [
1245
+ {
1246
+ index: 0,
1247
+ message: {
1248
+ role: 'assistant',
1249
+ content: response,
1250
+ },
1251
+ finish_reason: 'stop',
1252
+ },
1253
+ ],
1254
+ usage: {
1255
+ prompt_tokens: promptTokens,
1256
+ completion_tokens: completionTokens,
1257
+ total_tokens: promptTokens + completionTokens,
1258
+ },
1259
+ };
1260
+ } catch (error: any) {
1261
+ // Fallback to cloud provider if configured
1262
+ if (this.fallbackConfig?.provider === 'openai') {
1263
+ return this.fallbackToOpenAI(modelId, request);
1264
+ } else if (this.fallbackConfig?.provider === 'bedrock') {
1265
+ return this.fallbackToBedrock(modelId, request);
1266
+ }
1267
+ throw error;
1268
+ }
1269
+ }
1270
+
1271
+ // ── AWS Bedrock Compatibility ──────────────────────────────────────
1272
+
1273
+ async bedrockInvoke(modelId: string, request: BedrockInvokeRequest): Promise<BedrockInvokeResponse> {
1274
+ try {
1275
+ const response = await this.generate(modelId, request.inputText, {
1276
+ temperature: request.textGenerationConfig?.temperature,
1277
+ maxTokens: request.textGenerationConfig?.maxTokenCount,
1278
+ topP: request.textGenerationConfig?.topP,
1279
+ });
1280
+
1281
+ // Estimate token counts
1282
+ const inputTokens = Math.ceil(request.inputText.length / 4);
1283
+ const outputTokens = Math.ceil(response.length / 4);
1284
+
1285
+ return {
1286
+ results: [
1287
+ {
1288
+ outputText: response,
1289
+ tokenCount: outputTokens,
1290
+ },
1291
+ ],
1292
+ input_text_token_count: inputTokens,
1293
+ };
1294
+ } catch (error: any) {
1295
+ // Fallback to cloud provider if configured
1296
+ if (this.fallbackConfig?.provider === 'bedrock') {
1297
+ return this.fallbackToBedrockCloud(modelId, request);
1298
+ } else if (this.fallbackConfig?.provider === 'openai') {
1299
+ return this.fallbackToOpenAICloud(modelId, request);
1300
+ }
1301
+ throw error;
1302
+ }
1303
+ }
1304
+
1305
+ // ── Fallback: OpenAI Cloud ────────────────────────────────────────
1306
+
1307
+ private async fallbackToOpenAI(modelId: string, request: OpenAIChatCompletionRequest): Promise<OpenAIChatCompletionResponse> {
1308
+ if (!this.fallbackConfig) {
1309
+ throw new Error('OpenAI fallback not configured');
1310
+ }
1311
+
1312
+ const mappedModel = this.mapModelToOpenAI(modelId);
1313
+ const payload = {
1314
+ model: this.fallbackConfig.model || mappedModel,
1315
+ messages: request.messages,
1316
+ temperature: request.temperature,
1317
+ max_tokens: request.max_tokens,
1318
+ top_p: request.top_p,
1319
+ frequency_penalty: request.frequency_penalty,
1320
+ presence_penalty: request.presence_penalty,
1321
+ stop: request.stop,
1322
+ };
1323
+
1324
+ try {
1325
+ const response = await axios.post('https://api.openai.com/v1/chat/completions', payload, {
1326
+ headers: {
1327
+ Authorization: `Bearer ${this.fallbackConfig.apiKey}`,
1328
+ 'Content-Type': 'application/json',
1329
+ },
1330
+ });
1331
+
1332
+ this.emitEvent('fallback_success', { provider: 'openai', originalModel: modelId, mappedModel: this.fallbackConfig.model });
1333
+ return response.data;
1334
+ } catch (error: any) {
1335
+ this.emitProgress('error', 0, `OpenAI fallback failed: ${error.message}`);
1336
+ this.emitEvent('fallback_error', { provider: 'openai', error: error.message });
1337
+ throw error;
1338
+ }
1339
+ }
1340
+
1341
+ private async fallbackToBedrock(modelId: string, request: OpenAIChatCompletionRequest): Promise<OpenAIChatCompletionResponse> {
1342
+ if (!this.fallbackConfig) {
1343
+ throw new Error('Bedrock fallback not configured');
1344
+ }
1345
+
1346
+ // Convert OpenAI format to Bedrock's expected format (simplified)
1347
+ const lastMessage = request.messages[request.messages.length - 1];
1348
+ const inputText = lastMessage.content;
1349
+
1350
+ const bedrockResponse = await this.invokeBedrockCloud(inputText, {
1351
+ temperature: request.temperature,
1352
+ maxTokenCount: request.max_tokens,
1353
+ topP: request.top_p,
1354
+ });
1355
+
1356
+ // Convert Bedrock response back to OpenAI format
1357
+ const promptTokens = Math.ceil(inputText.length / 4);
1358
+ const completionTokens = bedrockResponse.results[0].tokenCount;
1359
+
1360
+ this.emitEvent('fallback_success', { provider: 'bedrock', originalModel: modelId, mappedModel: this.fallbackConfig.model });
1361
+
1362
+ return {
1363
+ id: `chat-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
1364
+ object: 'chat.completion',
1365
+ created: Math.floor(Date.now() / 1000),
1366
+ model: modelId,
1367
+ choices: [
1368
+ {
1369
+ index: 0,
1370
+ message: {
1371
+ role: 'assistant',
1372
+ content: bedrockResponse.results[0].outputText,
1373
+ },
1374
+ finish_reason: 'stop',
1375
+ },
1376
+ ],
1377
+ usage: {
1378
+ prompt_tokens: promptTokens,
1379
+ completion_tokens: completionTokens,
1380
+ total_tokens: promptTokens + completionTokens,
1381
+ },
1382
+ };
1383
+ }
1384
+
1385
+ private async fallbackToOpenAICloud(modelId: string, request: BedrockInvokeRequest): Promise<BedrockInvokeResponse> {
1386
+ if (!this.fallbackConfig) {
1387
+ throw new Error('OpenAI fallback not configured');
1388
+ }
1389
+
1390
+ const mappedModel = this.mapModelToOpenAI(modelId);
1391
+ const payload = {
1392
+ model: this.fallbackConfig.model || mappedModel,
1393
+ messages: [{ role: 'user', content: request.inputText }],
1394
+ temperature: request.textGenerationConfig?.temperature,
1395
+ max_tokens: request.textGenerationConfig?.maxTokenCount,
1396
+ top_p: request.textGenerationConfig?.topP,
1397
+ };
1398
+
1399
+ try {
1400
+ const response = await axios.post('https://api.openai.com/v1/chat/completions', payload, {
1401
+ headers: {
1402
+ Authorization: `Bearer ${this.fallbackConfig.apiKey}`,
1403
+ 'Content-Type': 'application/json',
1404
+ },
1405
+ });
1406
+
1407
+ const outputText = response.data.choices[0].message.content;
1408
+ const inputTokens = Math.ceil(request.inputText.length / 4);
1409
+ const outputTokens = response.data.usage.completion_tokens;
1410
+
1411
+ this.emitEvent('fallback_success', { provider: 'openai', originalModel: modelId, mappedModel: this.fallbackConfig.model });
1412
+
1413
+ return {
1414
+ results: [
1415
+ {
1416
+ outputText,
1417
+ tokenCount: outputTokens,
1418
+ },
1419
+ ],
1420
+ input_text_token_count: inputTokens,
1421
+ };
1422
+ } catch (error: any) {
1423
+ this.emitProgress('error', 0, `OpenAI fallback failed: ${error.message}`);
1424
+ this.emitEvent('fallback_error', { provider: 'openai', error: error.message });
1425
+ throw error;
1426
+ }
1427
+ }
1428
+
1429
+ private async fallbackToBedrockCloud(modelId: string, request: BedrockInvokeRequest): Promise<BedrockInvokeResponse> {
1430
+ if (!this.fallbackConfig) {
1431
+ throw new Error('Bedrock fallback not configured');
1432
+ }
1433
+
1434
+ try {
1435
+ return await this.invokeBedrockCloud(request.inputText, request.textGenerationConfig);
1436
+ } catch (error: any) {
1437
+ this.emitProgress('error', 0, `Bedrock fallback failed: ${error.message}`);
1438
+ this.emitEvent('fallback_error', { provider: 'bedrock', error: error.message });
1439
+ throw error;
1440
+ }
1441
+ }
1442
+
1443
+ private async invokeBedrockCloud(inputText: string, config?: BedrockTextGenerationConfig): Promise<BedrockInvokeResponse> {
1444
+ if (!this.fallbackConfig) {
1445
+ throw new Error('Bedrock fallback not configured');
1446
+ }
1447
+
1448
+ const region = this.fallbackConfig.region || 'us-east-1';
1449
+ const model = this.fallbackConfig.model || 'anthropic.claude-3-sonnet-20240229-v1:0';
1450
+
1451
+ // Bedrock endpoint format: https://bedrock-runtime.{region}.amazonaws.com/model/{modelId}/invoke
1452
+ const endpoint = `https://bedrock-runtime.${region}.amazonaws.com/model/${model}/invoke`;
1453
+
1454
+ const payload = {
1455
+ inputText,
1456
+ textGenerationConfig: {
1457
+ maxTokenCount: config?.maxTokenCount || 256,
1458
+ temperature: config?.temperature || 0.7,
1459
+ topP: config?.topP || 0.9,
1460
+ topK: config?.topK,
1461
+ stopSequences: config?.stopSequences,
1462
+ },
1463
+ };
1464
+
1465
+ try {
1466
+ const response = await axios.post(endpoint, payload, {
1467
+ headers: {
1468
+ Authorization: `Bearer ${this.fallbackConfig.apiKey}`,
1469
+ 'Content-Type': 'application/json',
1470
+ 'X-Amz-Target': 'AmazonBedrockRuntime.InvokeModel',
1471
+ },
1472
+ });
1473
+
1474
+ this.emitEvent('fallback_success', { provider: 'bedrock', model });
1475
+ return response.data;
1476
+ } catch (error: any) {
1477
+ throw new Error(`Bedrock invocation failed: ${error.message}`);
1478
+ }
1479
+ }
1480
+
1481
+ private mapModelToOpenAI(slyModelId: string): string {
1482
+ const modelMapping: Record<string, string> = {
1483
+ 'quantum-1.7b': 'gpt-4o-mini',
1484
+ 'quantum-3b': 'gpt-4o',
1485
+ 'quantum-code-3b': 'gpt-4o',
1486
+ 'quantum-8b': 'gpt-4-turbo',
1487
+ };
1488
+ return modelMapping[slyModelId] || 'gpt-4o-mini';
1489
+ }
1490
+
1491
+ // ═══════════════════════════════════════════════════════════
1492
+ // RAG — Retrieval Augmented Generation
1493
+ // ═══════════════════════════════════════════════════════════
1494
+
1495
+ private localEmbeddingModel: any = null;
1496
+ private offlineIndexes: Map<string, OfflineIndex> = new Map();
1497
+
1498
+ /**
1499
+ * Tier 2: Cloud-indexed RAG with local inference.
1500
+ * Retrieves relevant chunks from server, generates response locally.
1501
+ */
1502
+ async ragQuery(options: RAGOptions): Promise<RAGResponse> {
1503
+ const startTime = Date.now();
1504
+
1505
+ try {
1506
+ if (!this.token) throw new Error('Not authenticated. Call init() first.');
1507
+
1508
+ // Step 1: Retrieve relevant chunks from backend
1509
+ const searchResponse = await axios.post(
1510
+ `${this.apiUrl}/api/rag/knowledge-bases/${options.knowledgeBaseId}/query`,
1511
+ {
1512
+ query: options.query,
1513
+ top_k: options.topK || 5,
1514
+ model_id: options.modelId
1515
+ },
1516
+ { headers: { Authorization: `Bearer ${this.token}` } }
1517
+ );
1518
+
1519
+ let { retrieved_chunks, prompt_template, context } = searchResponse.data;
1520
+
1521
+ // Apply context window limits
1522
+ const contextWindow = this.modelContextWindow || 2048;
1523
+ const maxContextChars = (contextWindow - 200) * 3; // Rough token-to-char ratio, reserving 200 tokens
1524
+
1525
+ if (context && context.length > maxContextChars) {
1526
+ context = context.substring(0, maxContextChars) + '...';
1527
+ }
1528
+
1529
+ // Step 2: Generate response locally using the augmented prompt
1530
+ const response = await this.generate(options.modelId, prompt_template, {
1531
+ temperature: options.temperature,
1532
+ maxTokens: options.maxTokens,
1533
+ });
1534
+
1535
+ return {
1536
+ query: options.query,
1537
+ retrievedChunks: retrieved_chunks.map((c: any) => ({
1538
+ id: c.id,
1539
+ documentId: c.document_id,
1540
+ documentName: c.document_name,
1541
+ content: c.content,
1542
+ similarityScore: c.similarity_score,
1543
+ metadata: c.metadata
1544
+ })),
1545
+ generatedResponse: response,
1546
+ context,
1547
+ latencyMs: Date.now() - startTime,
1548
+ tierUsed: 2,
1549
+ };
1550
+ } catch (error: any) {
1551
+ this.emitEvent('error', { stage: 'rag_query', error: error.message });
1552
+ throw new Error(`RAG query failed: ${error.message}`);
1553
+ }
1554
+ }
1555
+
1556
+ /**
1557
+ * Tier 1: Fully local RAG. Zero network calls.
1558
+ * Documents are chunked/embedded on-device, retrieval and generation all local.
1559
+ */
1560
+ async ragQueryLocal(options: RAGOptions & { documents: Array<{ content: string; name?: string }> }): Promise<RAGResponse> {
1561
+ const startTime = Date.now();
1562
+
1563
+ try {
1564
+ // Step 1: Load embedding model if needed
1565
+ if (!this.localEmbeddingModel) {
1566
+ await this.loadEmbeddingModel();
1567
+ }
1568
+
1569
+ // Adapt chunk size based on context window for efficiency
1570
+ const contextWindow = this.modelContextWindow || 2048;
1571
+ const chunkSize = contextWindow <= 1024 ? 256 : contextWindow <= 2048 ? 512 : 1024;
1572
+ const overlap = Math.floor(chunkSize / 4);
1573
+
1574
+ // Step 2: Chunk documents if not already chunked
1575
+ const allChunks: Array<{ content: string; documentName: string; embedding?: number[] }> = [];
1576
+ for (const doc of options.documents) {
1577
+ const chunks = this.chunkTextLocal(doc.content, chunkSize, overlap);
1578
+ for (const chunk of chunks) {
1579
+ const embedding = await this.embedTextLocal(chunk);
1580
+ allChunks.push({ content: chunk, documentName: doc.name || 'Document', embedding });
1581
+ }
1582
+ }
1583
+
1584
+ // Step 3: Embed query
1585
+ const queryEmbedding = await this.embedTextLocal(options.query);
1586
+
1587
+ // Step 4: Cosine similarity search
1588
+ const scored = allChunks
1589
+ .filter(c => c.embedding)
1590
+ .map(c => ({
1591
+ ...c,
1592
+ similarityScore: this.cosineSimilarity(queryEmbedding, c.embedding!)
1593
+ }))
1594
+ .sort((a, b) => b.similarityScore - a.similarityScore)
1595
+ .slice(0, options.topK || 5);
1596
+
1597
+ // Step 5: Build context with size limits — keep context SHORT so model has room to generate
1598
+ const maxContextChars = contextWindow <= 2048 ? 800 : contextWindow <= 4096 ? 1500 : 3000;
1599
+ let contextLength = 0;
1600
+ const contextParts: string[] = [];
1601
+
1602
+ for (const c of scored) {
1603
+ const part = `[Source: ${c.documentName}]\n${c.content}`;
1604
+ if (contextLength + part.length <= maxContextChars) {
1605
+ contextParts.push(part);
1606
+ contextLength += part.length + 10; // Account for separator
1607
+ } else {
1608
+ break;
1609
+ }
1610
+ }
1611
+
1612
+ const context = contextParts.join('\n\n---\n\n');
1613
+ const prompt = `Use the following information to answer the question.\n\nInfo: ${context}\n\nQuestion: ${options.query}\nAnswer:`;
1614
+
1615
+ // Step 6: Generate locally
1616
+ const maxGen = contextWindow <= 2048 ? 150 : Math.min(300, Math.floor(contextWindow / 4));
1617
+ const response = await this.generate(options.modelId, prompt, {
1618
+ temperature: options.temperature || 0.6,
1619
+ maxTokens: options.maxTokens || maxGen,
1620
+ });
1621
+
1622
+ return {
1623
+ query: options.query,
1624
+ retrievedChunks: scored.map((c, i) => ({
1625
+ id: `local-${i}`,
1626
+ documentId: 'local',
1627
+ documentName: c.documentName,
1628
+ content: c.content,
1629
+ similarityScore: c.similarityScore,
1630
+ metadata: {}
1631
+ })),
1632
+ generatedResponse: response,
1633
+ context,
1634
+ latencyMs: Date.now() - startTime,
1635
+ tierUsed: 1,
1636
+ };
1637
+ } catch (error: any) {
1638
+ this.emitEvent('error', { stage: 'rag_local', error: error.message });
1639
+ throw new Error(`Local RAG failed: ${error.message}`);
1640
+ }
1641
+ }
1642
+
1643
+ /**
1644
+ * Tier 3: Offline RAG using a synced knowledge base.
1645
+ * First call syncKnowledgeBase(), then use this for offline queries.
1646
+ */
1647
+ async ragQueryOffline(options: RAGOptions): Promise<RAGResponse> {
1648
+ const startTime = Date.now();
1649
+
1650
+ const index = this.offlineIndexes.get(options.knowledgeBaseId);
1651
+ if (!index) {
1652
+ throw new Error(`Knowledge base "${options.knowledgeBaseId}" not synced. Call syncKnowledgeBase() first.`);
1653
+ }
1654
+
1655
+ // Check expiry
1656
+ if (new Date(index.metadata.expires_at) < new Date()) {
1657
+ throw new Error('Offline index has expired. Please re-sync.');
1658
+ }
1659
+
1660
+ try {
1661
+ // Load embedding model
1662
+ if (!this.localEmbeddingModel) {
1663
+ await this.loadEmbeddingModel();
1664
+ }
1665
+
1666
+ // Embed query
1667
+ const queryEmbedding = await this.embedTextLocal(options.query);
1668
+
1669
+ // Search offline index
1670
+ const scored = index.chunks
1671
+ .filter(c => c.embedding && c.embedding.length > 0)
1672
+ .map(c => ({
1673
+ ...c,
1674
+ similarityScore: this.cosineSimilarity(queryEmbedding, c.embedding!)
1675
+ }))
1676
+ .sort((a, b) => b.similarityScore - a.similarityScore)
1677
+ .slice(0, options.topK || 5);
1678
+
1679
+ // Build context with size limits — keep context SHORT so model has room to generate
1680
+ const contextWindow = this.modelContextWindow || 2048;
1681
+ const maxContextChars = contextWindow <= 2048 ? 800 : contextWindow <= 4096 ? 1500 : 3000;
1682
+ let contextLength = 0;
1683
+ const contextParts: string[] = [];
1684
+
1685
+ for (const c of scored) {
1686
+ const part = `[Source: ${c.document_name}]\n${c.content}`;
1687
+ if (contextLength + part.length <= maxContextChars) {
1688
+ contextParts.push(part);
1689
+ contextLength += part.length + 10;
1690
+ } else {
1691
+ break;
1692
+ }
1693
+ }
1694
+
1695
+ const context = contextParts.join('\n\n---\n\n');
1696
+ const prompt = `Use the following information to answer the question.\n\nInfo: ${context}\n\nQuestion: ${options.query}\nAnswer:`;
1697
+
1698
+ // Generate locally
1699
+ const maxGen = contextWindow <= 2048 ? 150 : Math.min(300, Math.floor(contextWindow / 4));
1700
+ const response = await this.generate(options.modelId, prompt, {
1701
+ temperature: options.temperature || 0.6,
1702
+ maxTokens: options.maxTokens || maxGen,
1703
+ });
1704
+
1705
+ return {
1706
+ query: options.query,
1707
+ retrievedChunks: scored.map(c => ({
1708
+ id: c.id,
1709
+ documentId: c.document_id,
1710
+ documentName: c.document_name,
1711
+ content: c.content,
1712
+ similarityScore: c.similarityScore,
1713
+ metadata: c.metadata
1714
+ })),
1715
+ generatedResponse: response,
1716
+ context,
1717
+ latencyMs: Date.now() - startTime,
1718
+ tierUsed: 3,
1719
+ };
1720
+ } catch (error: any) {
1721
+ this.emitEvent('error', { stage: 'rag_offline', error: error.message });
1722
+ throw new Error(`Offline RAG failed: ${error.message}`);
1723
+ }
1724
+ }
1725
+
1726
+ /**
1727
+ * Sync a knowledge base for offline use (Tier 3).
1728
+ * Downloads chunks + embeddings from server, stores locally.
1729
+ */
1730
+ async syncKnowledgeBase(knowledgeBaseId: string, deviceId?: string): Promise<{ chunkCount: number; sizeMb: number; expiresAt: string }> {
1731
+ try {
1732
+ if (!this.token) throw new Error('Not authenticated. Call init() first.');
1733
+
1734
+ const response = await axios.post(
1735
+ `${this.apiUrl}/api/rag/knowledge-bases/${knowledgeBaseId}/sync`,
1736
+ { device_id: deviceId || this.deviceId || 'sdk-device' },
1737
+ { headers: { Authorization: `Bearer ${this.token}` } }
1738
+ );
1739
+
1740
+ const { sync_package, chunk_count, package_size_mb, expires_at } = response.data;
1741
+ this.offlineIndexes.set(knowledgeBaseId, sync_package);
1742
+
1743
+ return {
1744
+ chunkCount: chunk_count,
1745
+ sizeMb: package_size_mb,
1746
+ expiresAt: expires_at
1747
+ };
1748
+ } catch (error: any) {
1749
+ throw new Error(`Sync failed: ${error.message}`);
1750
+ }
1751
+ }
1752
+
1753
+ // --- RAG Helper Methods ---
1754
+
1755
+ private async loadEmbeddingModel(): Promise<void> {
1756
+ this.emitProgress('downloading', 0, 'Loading embedding model (all-MiniLM-L6-v2)...');
1757
+ try {
1758
+ const { pipeline } = await import('@huggingface/transformers');
1759
+ this.localEmbeddingModel = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
1760
+ this.emitProgress('ready', 100, 'Embedding model loaded');
1761
+ } catch (error: any) {
1762
+ this.emitProgress('error', 0, `Embedding model failed: ${error.message}`);
1763
+ throw error;
1764
+ }
1765
+ }
1766
+
1767
+ private async embedTextLocal(text: string): Promise<number[]> {
1768
+ if (!this.localEmbeddingModel) throw new Error('Embedding model not loaded');
1769
+ const result = await this.localEmbeddingModel(text, { pooling: 'mean', normalize: true });
1770
+ // Handle different tensor output formats (v2 vs v3 of transformers)
1771
+ if (result.data) return Array.from(result.data);
1772
+ if (result.tolist) return result.tolist().flat();
1773
+ if (Array.isArray(result)) return result.flat();
1774
+ throw new Error('Unexpected embedding output format');
1775
+ }
1776
+
1777
+ private cosineSimilarity(a: number[], b: number[]): number {
1778
+ let dot = 0, normA = 0, normB = 0;
1779
+ for (let i = 0; i < a.length; i++) {
1780
+ dot += a[i] * b[i];
1781
+ normA += a[i] * a[i];
1782
+ normB += b[i] * b[i];
1783
+ }
1784
+ const denom = Math.sqrt(normA) * Math.sqrt(normB);
1785
+ return denom === 0 ? 0 : dot / denom;
1786
+ }
1787
+
1788
+ private chunkTextLocal(text: string, chunkSize: number = 512, overlap: number = 128): string[] {
1789
+ if (!text || text.length === 0) return [];
1790
+ if (overlap >= chunkSize) overlap = Math.floor(chunkSize * 0.25);
1791
+ const chunks: string[] = [];
1792
+ let start = 0;
1793
+ while (start < text.length) {
1794
+ let end = start + chunkSize;
1795
+ if (end < text.length) {
1796
+ const bp = Math.max(text.lastIndexOf('.', end), text.lastIndexOf('\n', end));
1797
+ if (bp > start + chunkSize / 2) end = bp + 1;
1798
+ }
1799
+ const chunk = text.slice(start, end).trim();
1800
+ if (chunk.length > 20) chunks.push(chunk);
1801
+ start = end - overlap;
1802
+ if (start >= text.length) break;
1803
+ }
1804
+ return chunks;
1805
+ }
1806
+
1807
+ // ── Static OpenAI Compatible Factory ────────────────────────────────
1808
+
1809
+ static openaiCompatible(config: { apiKey: string; apiUrl?: string; fallback?: FallbackConfig }): OpenAICompatibleClient {
1810
+ const instance = new SlyOS({
1811
+ apiKey: config.apiKey,
1812
+ apiUrl: config.apiUrl,
1813
+ fallback: { ...config.fallback, provider: config.fallback?.provider || 'openai' } as FallbackConfig,
1814
+ });
1815
+
1816
+ return {
1817
+ chat: {
1818
+ completions: {
1819
+ async create(request: OpenAIChatCompletionRequest & { model: string }): Promise<OpenAIChatCompletionResponse> {
1820
+ const { model, ...chatRequest } = request;
1821
+ return instance.chatCompletion(model, chatRequest);
1822
+ },
1823
+ },
1824
+ },
1825
+ };
1826
+ }
1827
+ }
1828
+
1829
+ export default SlyOS;
1830
+ export type {
1831
+ SlyOSConfig,
1832
+ SlyOSConfigWithFallback,
1833
+ GenerateOptions,
1834
+ TranscribeOptions,
1835
+ DeviceProfile,
1836
+ ProgressEvent,
1837
+ SlyEvent,
1838
+ QuantizationLevel,
1839
+ ModelCategory,
1840
+ OpenAIMessage,
1841
+ OpenAIChatCompletionRequest,
1842
+ OpenAIChatCompletionResponse,
1843
+ OpenAIChoice,
1844
+ OpenAIUsage,
1845
+ BedrockTextGenerationConfig,
1846
+ BedrockInvokeRequest,
1847
+ BedrockInvokeResponse,
1848
+ BedrockResult,
1849
+ FallbackConfig,
1850
+ FallbackProvider,
1851
+ OpenAICompatibleClient,
1852
+ RAGOptions,
1853
+ RAGChunk,
1854
+ RAGResponse,
1855
+ OfflineIndex,
1856
+ };