@dtelecom/agents-js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/providers/deepgram-stt.ts","../../src/providers/openrouter-llm.ts","../../src/providers/cartesia-tts.ts"],"sourcesContent":["/**\n * DeepgramSTT — real-time streaming STT via Deepgram WebSocket API.\n *\n * Protocol:\n * - Connect to wss://api.deepgram.com/v1/listen?... with config as query params\n * - Auth via Authorization header: \"Token <apiKey>\"\n * - Send audio as binary WebSocket frames (PCM16 16kHz mono)\n * - Receive JSON: { type: \"Results\", channel: { alternatives: [{ transcript }] }, is_final, speech_final }\n * - Send KeepAlive every 5s when no audio is being sent\n * - Send CloseStream to gracefully shut down\n *\n * End-of-utterance strategy:\n * Buffer all is_final=true transcripts. Emit the buffered utterance as a\n * single final TranscriptionResult when speech_final=true OR UtteranceEnd\n * arrives. Interim results (is_final=false) are emitted immediately for\n * real-time feedback.\n */\n\nimport WebSocket from 'ws';\nimport { BaseSTTStream } from '../core/base-stt-stream';\nimport type { STTPlugin, STTStream, STTStreamOptions, TranscriptionResult } from '../core/types';\nimport { createLogger } from '../utils/logger';\n\nconst log = createLogger('DeepgramSTT');\n\nconst DEEPGRAM_WS_URL = 'wss://api.deepgram.com/v1/listen';\nconst KEEPALIVE_INTERVAL_MS = 5_000;\n\nexport interface DeepgramSTTOptions {\n apiKey: string;\n /** Deepgram model (default: 'nova-3') */\n model?: string;\n /** Language code (default: 'en') */\n language?: string;\n /** Enable interim results (default: true) */\n interimResults?: boolean;\n /** Enable punctuation (default: true) */\n punctuate?: boolean;\n /** Endpointing in ms (default: 300). Set to false to disable. */\n endpointing?: number | false;\n /** Keywords to boost recognition (e.g. ['dTelecom:5', 'WebRTC:3']) */\n keywords?: string[];\n /** Enable smart formatting (default: false) */\n smartFormat?: boolean;\n /** Utterance end timeout in ms (default: 1000). Requires interimResults. */\n utteranceEndMs?: number;\n}\n\nexport class DeepgramSTT implements STTPlugin {\n private readonly options: Required<Pick<DeepgramSTTOptions, 'apiKey'>> & DeepgramSTTOptions;\n\n constructor(options: DeepgramSTTOptions) {\n if (!options.apiKey) {\n throw new Error('DeepgramSTT requires an apiKey');\n }\n this.options = options;\n }\n\n createStream(options?: STTStreamOptions): STTStream {\n const language = options?.language ?? this.options.language ?? 'en';\n return new DeepgramSTTStream(this.options, language);\n }\n}\n\nclass DeepgramSTTStream extends BaseSTTStream {\n private ws: WebSocket | null = null;\n private readonly apiKey: string;\n private readonly wsUrl: string;\n private _ready = false;\n private _closed = false;\n private pendingAudio: Buffer[] = [];\n private keepAliveTimer: ReturnType<typeof setInterval> | null = null;\n private lastAudioSentAt = 0;\n /** Buffer of is_final=true transcripts for the current utterance */\n private utteranceBuffer: string[] = [];\n /** Timestamp of the last non-empty interim result (approximates end of speech) */\n private lastInterimAt = 0;\n\n constructor(options: DeepgramSTTOptions, language: string) {\n super();\n this.apiKey = options.apiKey;\n this.wsUrl = buildWsUrl(options, language);\n this.connect();\n }\n\n sendAudio(pcm16: Buffer): void {\n if (this._closed) return;\n\n if (!this._ready) {\n this.pendingAudio.push(pcm16);\n return;\n }\n\n if (this.ws?.readyState === WebSocket.OPEN) {\n this.ws.send(pcm16);\n this.lastAudioSentAt = performance.now();\n }\n }\n\n async close(): Promise<void> {\n if (this._closed) return;\n this._closed = true;\n this._ready = false;\n this.pendingAudio = [];\n this.stopKeepAlive();\n\n if (this.ws?.readyState === WebSocket.OPEN) {\n // Graceful shutdown — ask server to flush remaining audio\n try {\n this.ws.send(JSON.stringify({ type: 'CloseStream' }));\n } catch {\n // Ignore send errors during shutdown\n }\n }\n\n if (this.ws) {\n this.ws.close();\n this.ws = null;\n }\n\n log.debug('DeepgramSTT stream closed');\n }\n\n private connect(): void {\n log.debug(`Connecting to Deepgram: ${this.wsUrl.replace(/token=[^&]+/, 'token=***')}`);\n\n this.ws = new WebSocket(this.wsUrl, {\n headers: {\n Authorization: `Token ${this.apiKey}`,\n },\n });\n\n this.ws.on('open', () => {\n log.info('Deepgram WebSocket connected');\n this._ready = true;\n\n // Flush pending audio\n for (const buf of this.pendingAudio) {\n if (this.ws?.readyState === WebSocket.OPEN) {\n this.ws.send(buf);\n }\n }\n this.pendingAudio = [];\n\n this.startKeepAlive();\n });\n\n this.ws.on('message', (data) => {\n try {\n const msg = JSON.parse(data.toString());\n this.handleMessage(msg);\n } catch (err) {\n log.error('Failed to parse Deepgram message:', err);\n }\n });\n\n this.ws.on('error', (err) => {\n log.error('Deepgram WebSocket error:', err);\n this.emit('error', err instanceof Error ? err : new Error(String(err)));\n });\n\n this.ws.on('close', (code, reason) => {\n log.debug(`Deepgram WebSocket closed: ${code} ${reason.toString()}`);\n this._ready = false;\n this.stopKeepAlive();\n\n // Reconnect if not intentionally closed\n if (!this._closed) {\n log.info('Deepgram connection lost, reconnecting in 1s...');\n setTimeout(() => {\n if (!this._closed) this.connect();\n }, 1000);\n }\n });\n }\n\n private handleMessage(msg: Record<string, unknown>): void {\n const type = msg.type as string;\n\n if (type === 'Results') {\n this.handleResults(msg);\n } else if (type === 'UtteranceEnd') {\n this.flushUtterance();\n } else if (type === 'Metadata') {\n log.debug('Deepgram session metadata received');\n } else if (type === 'SpeechStarted') {\n log.debug('Speech started detected');\n }\n }\n\n private handleResults(msg: Record<string, unknown>): void {\n const channel = msg.channel as { alternatives?: Array<{ transcript?: string; confidence?: number }> } | undefined;\n const transcript = channel?.alternatives?.[0]?.transcript ?? '';\n const confidence = channel?.alternatives?.[0]?.confidence;\n const isFinal = msg.is_final as boolean ?? false;\n const speechFinal = msg.speech_final as boolean ?? false;\n\n if (!transcript) return;\n\n if (!isFinal) {\n // Interim result — emit immediately for real-time feedback.\n // Include any buffered finals as prefix so the UI shows the full utterance.\n this.lastInterimAt = performance.now();\n const fullInterim = this.utteranceBuffer.length > 0\n ? this.utteranceBuffer.join(' ') + ' ' + transcript\n : transcript;\n this.emit('transcription', {\n text: fullInterim,\n isFinal: false,\n confidence: confidence ?? undefined,\n } satisfies TranscriptionResult);\n return;\n }\n\n // is_final=true — buffer this segment\n this.utteranceBuffer.push(transcript);\n\n if (speechFinal) {\n // End of utterance — emit the complete buffered transcript\n this.flushUtterance();\n }\n }\n\n /** Emit the buffered utterance as a single final transcription result. */\n private flushUtterance(): void {\n if (this.utteranceBuffer.length === 0) return;\n\n const now = performance.now();\n const fullText = this.utteranceBuffer.join(' ');\n this.utteranceBuffer = [];\n\n // sttDuration = time from last interim (≈ end of speech) to now (final result)\n // This includes endpointing delay + STT processing + network\n const sttDuration = this.lastInterimAt > 0 ? now - this.lastInterimAt : undefined;\n\n if (sttDuration !== undefined) {\n log.info(`stt_final: ${sttDuration.toFixed(0)}ms \"${fullText.slice(0, 50)}\"`);\n }\n\n this.lastInterimAt = 0;\n\n this.emit('transcription', {\n text: fullText,\n isFinal: true,\n sttDuration,\n } satisfies TranscriptionResult);\n }\n\n private startKeepAlive(): void {\n this.stopKeepAlive();\n this.keepAliveTimer = setInterval(() => {\n if (this.ws?.readyState === WebSocket.OPEN) {\n this.ws.send(JSON.stringify({ type: 'KeepAlive' }));\n }\n }, KEEPALIVE_INTERVAL_MS);\n }\n\n private stopKeepAlive(): void {\n if (this.keepAliveTimer) {\n clearInterval(this.keepAliveTimer);\n this.keepAliveTimer = null;\n }\n }\n}\n\n/** Build the Deepgram WebSocket URL with query parameters. */\nfunction buildWsUrl(options: DeepgramSTTOptions, language: string): string {\n const params = new URLSearchParams();\n\n params.set('model', options.model ?? 'nova-3');\n params.set('language', language);\n params.set('encoding', 'linear16');\n params.set('sample_rate', '16000');\n params.set('channels', '1');\n params.set('interim_results', String(options.interimResults ?? true));\n params.set('punctuate', String(options.punctuate ?? true));\n\n if (options.endpointing === false) {\n params.set('endpointing', 'false');\n } else {\n params.set('endpointing', String(options.endpointing ?? 300));\n }\n\n if (options.smartFormat) {\n params.set('smart_format', 'true');\n }\n\n if (options.utteranceEndMs !== undefined) {\n params.set('utterance_end_ms', String(options.utteranceEndMs));\n } else if (options.interimResults !== false) {\n // Default utterance_end_ms when interim results are enabled\n params.set('utterance_end_ms', '1000');\n }\n\n if (options.keywords?.length) {\n for (const kw of options.keywords) {\n params.append('keywords', kw);\n }\n }\n\n return `${DEEPGRAM_WS_URL}?${params.toString()}`;\n}\n","/**\n * OpenRouterLLM — streaming LLM via OpenRouter (OpenAI-compatible API).\n *\n * Uses native fetch() with SSE parsing for streaming responses.\n * No SDK dependency — just HTTP.\n */\n\nimport type { LLMPlugin, LLMChunk, Message } from '../core/types';\nimport { createLogger } from '../utils/logger';\n\nconst log = createLogger('OpenRouterLLM');\n\nconst OPENROUTER_URL = 'https://openrouter.ai/api/v1/chat/completions';\n\nexport interface OpenRouterLLMOptions {\n apiKey: string;\n /** Model identifier (e.g. 'openai/gpt-4o', 'anthropic/claude-sonnet-4') */\n model: string;\n /** Max tokens in response (default: 512) */\n maxTokens?: number;\n /** Sampling temperature 0-2 (default: 0.7) */\n temperature?: number;\n /** OpenRouter provider routing preferences */\n providerRouting?: {\n /** Sort providers by metric (e.g. 'latency') */\n sort?: string;\n /** Pin to specific providers in order */\n order?: string[];\n /** Allow fallback to other providers if pinned ones fail */\n allowFallbacks?: boolean;\n };\n}\n\nexport class OpenRouterLLM implements LLMPlugin {\n private readonly apiKey: string;\n private readonly model: string;\n private readonly maxTokens: number;\n private readonly temperature: number;\n private readonly provider?: { sort?: string; order?: string[]; allow_fallbacks?: boolean };\n\n constructor(options: OpenRouterLLMOptions) {\n if (!options.apiKey) {\n throw new Error('OpenRouterLLM requires an apiKey');\n }\n this.apiKey = options.apiKey;\n this.model = options.model;\n this.maxTokens = options.maxTokens ?? 512;\n this.temperature = options.temperature ?? 0.7;\n\n if (options.providerRouting) {\n this.provider = {\n sort: options.providerRouting.sort,\n order: options.providerRouting.order,\n allow_fallbacks: options.providerRouting.allowFallbacks,\n };\n }\n }\n\n /**\n * Warm up the LLM by sending the system prompt and a short message.\n * Primes the HTTP/TLS connection and model loading on the provider side.\n */\n async warmup(systemPrompt: string): Promise<void> {\n log.info('Warming up LLM connection...');\n const start = performance.now();\n\n const messages: Message[] = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: 'Hello' },\n ];\n\n try {\n const gen = this.chat(messages);\n for await (const chunk of gen) {\n if (chunk.type === 'done') break;\n }\n log.info(`LLM warmup complete in ${(performance.now() - start).toFixed(0)}ms`);\n } catch (err) {\n log.warn('LLM warmup failed (non-fatal):', err);\n }\n }\n\n async *chat(messages: Message[], signal?: AbortSignal): AsyncGenerator<LLMChunk> {\n const body: Record<string, unknown> = {\n model: this.model,\n messages,\n max_tokens: this.maxTokens,\n temperature: this.temperature,\n stream: true,\n };\n if (this.provider) {\n body.provider = this.provider;\n }\n\n log.debug(`LLM request: model=${this.model}, messages=${messages.length}`);\n\n const response = await fetch(OPENROUTER_URL, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'Authorization': `Bearer ${this.apiKey}`,\n },\n body: JSON.stringify(body),\n signal,\n });\n\n if (!response.ok) {\n const errorText = await response.text();\n throw new Error(`OpenRouter API error ${response.status}: ${errorText}`);\n }\n\n if (!response.body) {\n throw new Error('OpenRouter response has no body');\n }\n\n // Parse SSE stream\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n // Check abort before blocking on read — prevents hanging when signal\n // was fired while we were yielding tokens to the pipeline\n if (signal?.aborted) break;\n\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n const trimmed = line.trim();\n if (!trimmed || !trimmed.startsWith('data: ')) continue;\n\n const data = trimmed.slice(6);\n if (data === '[DONE]') {\n yield { type: 'done' };\n return;\n }\n\n try {\n const parsed = JSON.parse(data);\n const choice = parsed.choices?.[0];\n if (!choice) continue;\n\n const delta = choice.delta;\n if (delta?.content) {\n yield { type: 'token', token: delta.content };\n }\n\n // Usage stats in the final chunk\n if (parsed.usage) {\n yield {\n type: 'done',\n usage: {\n promptTokens: parsed.usage.prompt_tokens,\n completionTokens: parsed.usage.completion_tokens,\n },\n };\n return;\n }\n } catch {\n // Skip malformed JSON chunks\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n\n yield { type: 'done' };\n }\n}\n","/**\n * CartesiaTTS — real-time streaming TTS via Cartesia WebSocket API.\n *\n * Protocol:\n * - Connect to wss://api.cartesia.ai/tts/websocket?api_key=...&cartesia_version=...\n * - Send JSON: { model_id, transcript, voice: { mode: \"id\", id }, output_format, context_id }\n * - Receive JSON: { type: \"chunk\", data: \"<base64 PCM>\" } — audio data\n * - Receive JSON: { type: \"done\", context_id } — synthesis complete\n * - Audio is base64-encoded PCM16 LE at the requested sample rate\n *\n * Uses a persistent WebSocket connection to avoid per-sentence handshake overhead.\n * Each synthesize() call uses a unique context_id for multiplexing.\n */\n\nimport WebSocket from 'ws';\nimport type { TTSPlugin } from '../core/types';\nimport { createLogger } from '../utils/logger';\n\nconst log = createLogger('CartesiaTTS');\n\nconst CARTESIA_WS_BASE = 'wss://api.cartesia.ai/tts/websocket';\nconst DEFAULT_API_VERSION = '2024-06-10';\nconst DEFAULT_MODEL = 'sonic-3';\n/** Pipeline operates at 48kHz — matches Opus/WebRTC native rate, no resampling */\nconst DEFAULT_SAMPLE_RATE = 48000;\n/** Reconnect after idle timeout (Cartesia closes after 5 min idle) */\nconst RECONNECT_DELAY_MS = 1000;\n\nexport interface CartesiaTTSOptions {\n apiKey: string;\n /** Cartesia voice ID */\n voiceId: string;\n /** Model ID (default: 'sonic-3') */\n modelId?: string;\n /** Output sample rate in Hz (default: 16000) */\n sampleRate?: number;\n /** API version (default: '2024-06-10') */\n apiVersion?: string;\n /** Language code (default: 'en') */\n language?: string;\n /** Speech speed multiplier, 0.6-1.5 (default: 1.0). Sonic-3 only. */\n speed?: number;\n /** Emotion string (e.g. 'friendly', 'calm'). Sonic-3 only. */\n emotion?: string;\n}\n\n/** Per-context state for tracking an in-flight synthesis. */\ninterface ContextState {\n chunks: Buffer[];\n done: boolean;\n error: Error | null;\n wake: (() => void) | null;\n}\n\nexport class CartesiaTTS implements TTSPlugin {\n private readonly apiKey: string;\n private readonly voiceId: string;\n private readonly modelId: string;\n private readonly sampleRate: number;\n private readonly apiVersion: string;\n private readonly language?: string;\n private readonly speed: number | undefined;\n private readonly emotion: string | undefined;\n\n private ws: WebSocket | null = null;\n private _connected = false;\n private connectPromise: Promise<void> | null = null;\n /** Active contexts keyed by context_id */\n private contexts = new Map<string, ContextState>();\n private contextCounter = 0;\n\n constructor(options: CartesiaTTSOptions) {\n if (!options.apiKey) {\n throw new Error('CartesiaTTS requires an apiKey');\n }\n if (!options.voiceId) {\n throw new Error('CartesiaTTS requires a voiceId');\n }\n this.apiKey = options.apiKey;\n this.voiceId = options.voiceId;\n this.modelId = options.modelId ?? DEFAULT_MODEL;\n this.sampleRate = options.sampleRate ?? DEFAULT_SAMPLE_RATE;\n this.apiVersion = options.apiVersion ?? DEFAULT_API_VERSION;\n this.language = options.language;\n this.speed = options.speed;\n this.emotion = options.emotion;\n }\n\n /** Pre-connect the WebSocket so first synthesize() doesn't pay connection cost. */\n async warmup(): Promise<void> {\n log.info('Warming up TTS connection...');\n const start = performance.now();\n try {\n await this.ensureConnection();\n log.info(`TTS warmup complete in ${(performance.now() - start).toFixed(0)}ms`);\n } catch (err) {\n log.warn('TTS warmup failed (non-fatal):', err);\n }\n }\n\n async *synthesize(text: string, signal?: AbortSignal): AsyncGenerator<Buffer> {\n log.debug(`Synthesizing: \"${text.slice(0, 60)}\"`);\n\n await this.ensureConnection();\n\n if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {\n throw new Error('Cartesia WebSocket not connected');\n }\n\n const contextId = `ctx-${++this.contextCounter}-${Date.now()}`;\n const ctx: ContextState = { chunks: [], done: false, error: null, wake: null };\n this.contexts.set(contextId, ctx);\n\n // Build request\n const request: Record<string, unknown> = {\n model_id: this.modelId,\n transcript: text,\n voice: { mode: 'id', id: this.voiceId },\n output_format: {\n container: 'raw',\n encoding: 'pcm_s16le',\n sample_rate: this.sampleRate,\n },\n context_id: contextId,\n continue: false,\n };\n\n if (this.language) {\n request.language = this.language;\n }\n\n // Sonic-3 generation config\n if (this.speed !== undefined || this.emotion !== undefined) {\n const genConfig: Record<string, unknown> = {};\n if (this.speed !== undefined) genConfig.speed = this.speed;\n if (this.emotion !== undefined) genConfig.emotion = this.emotion;\n request.generation_config = genConfig;\n }\n\n // Handle abort — cancel the context on the server\n const onAbort = () => {\n ctx.done = true;\n ctx.wake?.();\n // Send cancel to server so it stops generating\n if (this.ws?.readyState === WebSocket.OPEN) {\n try {\n this.ws.send(JSON.stringify({ context_id: contextId, cancel: true }));\n } catch {\n // Ignore send errors during cancellation\n }\n }\n };\n signal?.addEventListener('abort', onAbort, { once: true });\n\n // Send synthesis request\n this.ws.send(JSON.stringify(request));\n\n // Yield audio chunks as they arrive\n try {\n while (true) {\n if (signal?.aborted) break;\n if (ctx.error) throw ctx.error;\n\n if (ctx.chunks.length > 0) {\n yield ctx.chunks.shift()!;\n continue;\n }\n\n if (ctx.done) break;\n\n // Wait for next chunk or done signal\n await new Promise<void>((resolve) => {\n ctx.wake = resolve;\n });\n ctx.wake = null;\n }\n\n // Drain remaining chunks\n while (ctx.chunks.length > 0) {\n yield ctx.chunks.shift()!;\n }\n } finally {\n signal?.removeEventListener('abort', onAbort);\n this.contexts.delete(contextId);\n }\n }\n\n /** Ensure the persistent WebSocket is connected. */\n private ensureConnection(): Promise<void> {\n if (this._connected && this.ws?.readyState === WebSocket.OPEN) {\n return Promise.resolve();\n }\n\n // Deduplicate concurrent connection attempts\n if (this.connectPromise) return this.connectPromise;\n\n this.connectPromise = new Promise<void>((resolve, reject) => {\n const url = `${CARTESIA_WS_BASE}?api_key=${this.apiKey}&cartesia_version=${this.apiVersion}`;\n log.debug('Connecting to Cartesia...');\n\n this.ws = new WebSocket(url);\n\n this.ws.on('open', () => {\n this._connected = true;\n this.connectPromise = null;\n log.info('Cartesia WebSocket connected');\n resolve();\n });\n\n this.ws.on('message', (data) => {\n try {\n const msg = JSON.parse(data.toString());\n this.handleMessage(msg);\n } catch (err) {\n log.error('Failed to parse Cartesia message:', err);\n }\n });\n\n this.ws.on('error', (err) => {\n const error = err instanceof Error ? err : new Error(String(err));\n log.error('Cartesia WebSocket error:', error);\n // Propagate error to all active contexts\n for (const ctx of this.contexts.values()) {\n ctx.error = error;\n ctx.wake?.();\n }\n this._connected = false;\n this.connectPromise = null;\n reject(error);\n });\n\n this.ws.on('close', (code, reason) => {\n log.debug(`Cartesia WebSocket closed: ${code} ${reason.toString()}`);\n this._connected = false;\n this.connectPromise = null;\n // Mark all active contexts as done\n for (const ctx of this.contexts.values()) {\n ctx.done = true;\n ctx.wake?.();\n }\n });\n });\n\n return this.connectPromise;\n }\n\n private handleMessage(msg: Record<string, unknown>): void {\n const contextId = msg.context_id as string | undefined;\n if (!contextId) return;\n\n const ctx = this.contexts.get(contextId);\n if (!ctx) return; // Stale context — already cleaned up\n\n const type = msg.type as string;\n\n if (type === 'chunk') {\n const b64 = msg.data as string;\n if (b64) {\n const pcm = Buffer.from(b64, 'base64');\n ctx.chunks.push(pcm);\n ctx.wake?.();\n }\n } else if (type === 'done') {\n log.debug(`Cartesia synthesis done for ${contextId} (${ctx.chunks.length} chunks pending)`);\n ctx.done = true;\n ctx.wake?.();\n } else if (type === 'error') {\n const errorMsg = msg.error as string ?? 'Unknown Cartesia error';\n log.error(`Cartesia error for ${contextId}: ${errorMsg}`);\n ctx.error = new Error(`Cartesia TTS error: ${errorMsg}`);\n ctx.wake?.();\n }\n }\n}\n"],"mappings":";;;;;;;;AAkBA,OAAO,eAAe;AAKtB,IAAM,MAAM,aAAa,aAAa;AAEtC,IAAM,kBAAkB;AACxB,IAAM,wBAAwB;AAsBvB,IAAM,cAAN,MAAuC;AAAA,EAC3B;AAAA,EAEjB,YAAY,SAA6B;AACvC,QAAI,CAAC,QAAQ,QAAQ;AACnB,YAAM,IAAI,MAAM,gCAAgC;AAAA,IAClD;AACA,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,aAAa,SAAuC;AAClD,UAAM,WAAW,SAAS,YAAY,KAAK,QAAQ,YAAY;AAC/D,WAAO,IAAI,kBAAkB,KAAK,SAAS,QAAQ;AAAA,EACrD;AACF;AAEA,IAAM,oBAAN,cAAgC,cAAc;AAAA,EACpC,KAAuB;AAAA,EACd;AAAA,EACA;AAAA,EACT,SAAS;AAAA,EACT,UAAU;AAAA,EACV,eAAyB,CAAC;AAAA,EAC1B,iBAAwD;AAAA,EACxD,kBAAkB;AAAA;AAAA,EAElB,kBAA4B,CAAC;AAAA;AAAA,EAE7B,gBAAgB;AAAA,EAExB,YAAY,SAA6B,UAAkB;AACzD,UAAM;AACN,SAAK,SAAS,QAAQ;AACtB,SAAK,QAAQ,WAAW,SAAS,QAAQ;AACzC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,UAAU,OAAqB;AAC7B,QAAI,KAAK,QAAS;AAElB,QAAI,CAAC,KAAK,QAAQ;AAChB,WAAK,aAAa,KAAK,KAAK;AAC5B;AAAA,IACF;AAEA,QAAI,KAAK,IAAI,eAAe,UAAU,MAAM;AAC1C,WAAK,GAAG,KAAK,KAAK;AAClB,WAAK,kBAAkB,YAAY,IAAI;AAAA,IACzC;AAAA,EACF;AAAA,EAEA,MAAM,QAAuB;AAC3B,QAAI,KAAK,QAAS;AAClB,SAAK,UAAU;AACf,SAAK,SAAS;AACd,SAAK,eAAe,CAAC;AACrB,SAAK,cAAc;AAEnB,QAAI,KAAK,IAAI,eAAe,UAAU,MAAM;AAE1C,UAAI;AACF,aAAK,GAAG,KAAK,KAAK,UAAU,EAAE,MAAM,cAAc,CAAC,CAAC;AAAA,MACtD,QAAQ;AAAA,MAER;AAAA,IACF;AAEA,QAAI,KAAK,IAAI;AACX,WAAK,GAAG,MAAM;AACd,WAAK,KAAK;AAAA,IACZ;AAEA,QAAI,MAAM,2BAA2B;AAAA,EACvC;AAAA,EAEQ,UAAgB;AACtB,QAAI,MAAM,2BAA2B,KAAK,MAAM,QAAQ,eAAe,WAAW,CAAC,EAAE;AAErF,SAAK,KAAK,IAAI,UAAU,KAAK,OAAO;AAAA,MAClC,SAAS;AAAA,QACP,eAAe,SAAS,KAAK,MAAM;AAAA,MACrC;AAAA,IACF,CAAC;AAED,SAAK,GAAG,GAAG,QAAQ,MAAM;AACvB,UAAI,KAAK,8BAA8B;AACvC,WAAK,SAAS;AAGd,iBAAW,OAAO,KAAK,cAAc;AACnC,YAAI,KAAK,IAAI,eAAe,UAAU,MAAM;AAC1C,eAAK,GAAG,KAAK,GAAG;AAAA,QAClB;AAAA,MACF;AACA,WAAK,eAAe,CAAC;AAErB,WAAK,eAAe;AAAA,IACtB,CAAC;AAED,SAAK,GAAG,GAAG,WAAW,CAAC,SAAS;AAC9B,UAAI;AACF,cAAM,MAAM,KAAK,MAAM,KAAK,SAAS,CAAC;AACtC,aAAK,cAAc,GAAG;AAAA,MACxB,SAAS,KAAK;AACZ,YAAI,MAAM,qCAAqC,GAAG;AAAA,MACpD;AAAA,IACF,CAAC;AAED,SAAK,GAAG,GAAG,SAAS,CAAC,QAAQ;AAC3B,UAAI,MAAM,6BAA6B,GAAG;AAC1C,WAAK,KAAK,SAAS,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,IACxE,CAAC;AAED,SAAK,GAAG,GAAG,SAAS,CAAC,MAAM,WAAW;AACpC,UAAI,MAAM,8BAA8B,IAAI,IAAI,OAAO,SAAS,CAAC,EAAE;AACnE,WAAK,SAAS;AACd,WAAK,cAAc;AAGnB,UAAI,CAAC,KAAK,SAAS;AACjB,YAAI,KAAK,iDAAiD;AAC1D,mBAAW,MAAM;AACf,cAAI,CAAC,KAAK,QAAS,MAAK,QAAQ;AAAA,QAClC,GAAG,GAAI;AAAA,MACT;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,cAAc,KAAoC;AACxD,UAAM,OAAO,IAAI;AAEjB,QAAI,SAAS,WAAW;AACtB,WAAK,cAAc,GAAG;AAAA,IACxB,WAAW,SAAS,gBAAgB;AAClC,WAAK,eAAe;AAAA,IACtB,WAAW,SAAS,YAAY;AAC9B,UAAI,MAAM,oCAAoC;AAAA,IAChD,WAAW,SAAS,iBAAiB;AACnC,UAAI,MAAM,yBAAyB;AAAA,IACrC;AAAA,EACF;AAAA,EAEQ,cAAc,KAAoC;AACxD,UAAM,UAAU,IAAI;AACpB,UAAM,aAAa,SAAS,eAAe,CAAC,GAAG,cAAc;AAC7D,UAAM,aAAa,SAAS,eAAe,CAAC,GAAG;AAC/C,UAAM,UAAU,IAAI,YAAuB;AAC3C,UAAM,cAAc,IAAI,gBAA2B;AAEnD,QAAI,CAAC,WAAY;AAEjB,QAAI,CAAC,SAAS;AAGZ,WAAK,gBAAgB,YAAY,IAAI;AACrC,YAAM,cAAc,KAAK,gBAAgB,SAAS,IAC9C,KAAK,gBAAgB,KAAK,GAAG,IAAI,MAAM,aACvC;AACJ,WAAK,KAAK,iBAAiB;AAAA,QACzB,MAAM;AAAA,QACN,SAAS;AAAA,QACT,YAAY,cAAc;AAAA,MAC5B,CAA+B;AAC/B;AAAA,IACF;AAGA,SAAK,gBAAgB,KAAK,UAAU;AAEpC,QAAI,aAAa;AAEf,WAAK,eAAe;AAAA,IACtB;AAAA,EACF;AAAA;AAAA,EAGQ,iBAAuB;AAC7B,QAAI,KAAK,gBAAgB,WAAW,EAAG;AAEvC,UAAM,MAAM,YAAY,IAAI;AAC5B,UAAM,WAAW,KAAK,gBAAgB,KAAK,GAAG;AAC9C,SAAK,kBAAkB,CAAC;AAIxB,UAAM,cAAc,KAAK,gBAAgB,IAAI,MAAM,KAAK,gBAAgB;AAExE,QAAI,gBAAgB,QAAW;AAC7B,UAAI,KAAK,cAAc,YAAY,QAAQ,CAAC,CAAC,OAAO,SAAS,MAAM,GAAG,EAAE,CAAC,GAAG;AAAA,IAC9E;AAEA,SAAK,gBAAgB;AAErB,SAAK,KAAK,iBAAiB;AAAA,MACzB,MAAM;AAAA,MACN,SAAS;AAAA,MACT;AAAA,IACF,CAA+B;AAAA,EACjC;AAAA,EAEQ,iBAAuB;AAC7B,SAAK,cAAc;AACnB,SAAK,iBAAiB,YAAY,MAAM;AACtC,UAAI,KAAK,IAAI,eAAe,UAAU,MAAM;AAC1C,aAAK,GAAG,KAAK,KAAK,UAAU,EAAE,MAAM,YAAY,CAAC,CAAC;AAAA,MACpD;AAAA,IACF,GAAG,qBAAqB;AAAA,EAC1B;AAAA,EAEQ,gBAAsB;AAC5B,QAAI,KAAK,gBAAgB;AACvB,oBAAc,KAAK,cAAc;AACjC,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AACF;AAGA,SAAS,WAAW,SAA6B,UAA0B;AACzE,QAAM,SAAS,IAAI,gBAAgB;AAEnC,SAAO,IAAI,SAAS,QAAQ,SAAS,QAAQ;AAC7C,SAAO,IAAI,YAAY,QAAQ;AAC/B,SAAO,IAAI,YAAY,UAAU;AACjC,SAAO,IAAI,eAAe,OAAO;AACjC,SAAO,IAAI,YAAY,GAAG;AAC1B,SAAO,IAAI,mBAAmB,OAAO,QAAQ,kBAAkB,IAAI,CAAC;AACpE,SAAO,IAAI,aAAa,OAAO,QAAQ,aAAa,IAAI,CAAC;AAEzD,MAAI,QAAQ,gBAAgB,OAAO;AACjC,WAAO,IAAI,eAAe,OAAO;AAAA,EACnC,OAAO;AACL,WAAO,IAAI,eAAe,OAAO,QAAQ,eAAe,GAAG,CAAC;AAAA,EAC9D;AAEA,MAAI,QAAQ,aAAa;AACvB,WAAO,IAAI,gBAAgB,MAAM;AAAA,EACnC;AAEA,MAAI,QAAQ,mBAAmB,QAAW;AACxC,WAAO,IAAI,oBAAoB,OAAO,QAAQ,cAAc,CAAC;AAAA,EAC/D,WAAW,QAAQ,mBAAmB,OAAO;AAE3C,WAAO,IAAI,oBAAoB,MAAM;AAAA,EACvC;AAEA,MAAI,QAAQ,UAAU,QAAQ;AAC5B,eAAW,MAAM,QAAQ,UAAU;AACjC,aAAO,OAAO,YAAY,EAAE;AAAA,IAC9B;AAAA,EACF;AAEA,SAAO,GAAG,eAAe,IAAI,OAAO,SAAS,CAAC;AAChD;;;ACnSA,IAAMA,OAAM,aAAa,eAAe;AAExC,IAAM,iBAAiB;AAqBhB,IAAM,gBAAN,MAAyC;AAAA,EAC7B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAA+B;AACzC,QAAI,CAAC,QAAQ,QAAQ;AACnB,YAAM,IAAI,MAAM,kCAAkC;AAAA,IACpD;AACA,SAAK,SAAS,QAAQ;AACtB,SAAK,QAAQ,QAAQ;AACrB,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,cAAc,QAAQ,eAAe;AAE1C,QAAI,QAAQ,iBAAiB;AAC3B,WAAK,WAAW;AAAA,QACd,MAAM,QAAQ,gBAAgB;AAAA,QAC9B,OAAO,QAAQ,gBAAgB;AAAA,QAC/B,iBAAiB,QAAQ,gBAAgB;AAAA,MAC3C;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,OAAO,cAAqC;AAChD,IAAAA,KAAI,KAAK,8BAA8B;AACvC,UAAM,QAAQ,YAAY,IAAI;AAE9B,UAAM,WAAsB;AAAA,MAC1B,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,MACxC,EAAE,MAAM,QAAQ,SAAS,QAAQ;AAAA,IACnC;AAEA,QAAI;AACF,YAAM,MAAM,KAAK,KAAK,QAAQ;AAC9B,uBAAiB,SAAS,KAAK;AAC7B,YAAI,MAAM,SAAS,OAAQ;AAAA,MAC7B;AACA,MAAAA,KAAI,KAAK,2BAA2B,YAAY,IAAI,IAAI,OAAO,QAAQ,CAAC,CAAC,IAAI;AAAA,IAC/E,SAAS,KAAK;AACZ,MAAAA,KAAI,KAAK,kCAAkC,GAAG;AAAA,IAChD;AAAA,EACF;AAAA,EAEA,OAAO,KAAK,UAAqB,QAAgD;AAC/E,UAAM,OAAgC;AAAA,MACpC,OAAO,KAAK;AAAA,MACZ;AAAA,MACA,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,QAAQ;AAAA,IACV;AACA,QAAI,KAAK,UAAU;AACjB,WAAK,WAAW,KAAK;AAAA,IACvB;AAEA,IAAAA,KAAI,MAAM,sBAAsB,KAAK,KAAK,cAAc,SAAS,MAAM,EAAE;AAEzE,UAAM,WAAW,MAAM,MAAM,gBAAgB;AAAA,MAC3C,QAAQ;AAAA,MACR,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,iBAAiB,UAAU,KAAK,MAAM;AAAA,MACxC;AAAA,MACA,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB;AAAA,IACF,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,YAAY,MAAM,SAAS,KAAK;AACtC,YAAM,IAAI,MAAM,wBAAwB,SAAS,MAAM,KAAK,SAAS,EAAE;AAAA,IACzE;AAEA,QAAI,CAAC,SAAS,MAAM;AAClB,YAAM,IAAI,MAAM,iCAAiC;AAAA,IACnD;AAGA,UAAM,SAAS,SAAS,KAAK,UAAU;AACvC,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AAEb,QAAI;AACF,aAAO,MAAM;AAGX,YAAI,QAAQ,QAAS;AAErB,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,YAAI,KAAM;AAEV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACxB,gBAAM,UAAU,KAAK,KAAK;AAC1B,cAAI,CAAC,WAAW,CAAC,QAAQ,WAAW,QAAQ,EAAG;AAE/C,gBAAM,OAAO,QAAQ,MAAM,CAAC;AAC5B,cAAI,SAAS,UAAU;AACrB,kBAAM,EAAE,MAAM,OAAO;AACrB;AAAA,UACF;AAEA,cAAI;AACF,kBAAM,SAAS,KAAK,MAAM,IAAI;AAC9B,kBAAM,SAAS,OAAO,UAAU,CAAC;AACjC,gBAAI,CAAC,OAAQ;AAEb,kBAAM,QAAQ,OAAO;AACrB,gBAAI,OAAO,SAAS;AAClB,oBAAM,EAAE,MAAM,SAAS,OAAO,MAAM,QAAQ;AAAA,YAC9C;AAGA,gBAAI,OAAO,OAAO;AAChB,oBAAM;AAAA,gBACJ,MAAM;AAAA,gBACN,OAAO;AAAA,kBACL,cAAc,OAAO,MAAM;AAAA,kBAC3B,kBAAkB,OAAO,MAAM;AAAA,gBACjC;AAAA,cACF;AACA;AAAA,YACF;AAAA,UACF,QAAQ;AAAA,UAER;AAAA,QACF;AAAA,MACF;AAAA,IACF,UAAE;AACA,aAAO,YAAY;AAAA,IACrB;AAEA,UAAM,EAAE,MAAM,OAAO;AAAA,EACvB;AACF;;;ACjKA,OAAOC,gBAAe;AAItB,IAAMC,OAAM,aAAa,aAAa;AAEtC,IAAM,mBAAmB;AACzB,IAAM,sBAAsB;AAC5B,IAAM,gBAAgB;AAEtB,IAAM,sBAAsB;AA8BrB,IAAM,cAAN,MAAuC;AAAA,EAC3B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,KAAuB;AAAA,EACvB,aAAa;AAAA,EACb,iBAAuC;AAAA;AAAA,EAEvC,WAAW,oBAAI,IAA0B;AAAA,EACzC,iBAAiB;AAAA,EAEzB,YAAY,SAA6B;AACvC,QAAI,CAAC,QAAQ,QAAQ;AACnB,YAAM,IAAI,MAAM,gCAAgC;AAAA,IAClD;AACA,QAAI,CAAC,QAAQ,SAAS;AACpB,YAAM,IAAI,MAAM,gCAAgC;AAAA,IAClD;AACA,SAAK,SAAS,QAAQ;AACtB,SAAK,UAAU,QAAQ;AACvB,SAAK,UAAU,QAAQ,WAAW;AAClC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,WAAW,QAAQ;AACxB,SAAK,QAAQ,QAAQ;AACrB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,IAAAC,KAAI,KAAK,8BAA8B;AACvC,UAAM,QAAQ,YAAY,IAAI;AAC9B,QAAI;AACF,YAAM,KAAK,iBAAiB;AAC5B,MAAAA,KAAI,KAAK,2BAA2B,YAAY,IAAI,IAAI,OAAO,QAAQ,CAAC,CAAC,IAAI;AAAA,IAC/E,SAAS,KAAK;AACZ,MAAAA,KAAI,KAAK,kCAAkC,GAAG;AAAA,IAChD;AAAA,EACF;AAAA,EAEA,OAAO,WAAW,MAAc,QAA8C;AAC5E,IAAAA,KAAI,MAAM,kBAAkB,KAAK,MAAM,GAAG,EAAE,CAAC,GAAG;AAEhD,UAAM,KAAK,iBAAiB;AAE5B,QAAI,CAAC,KAAK,MAAM,KAAK,GAAG,eAAeC,WAAU,MAAM;AACrD,YAAM,IAAI,MAAM,kCAAkC;AAAA,IACpD;AAEA,UAAM,YAAY,OAAO,EAAE,KAAK,cAAc,IAAI,KAAK,IAAI,CAAC;AAC5D,UAAM,MAAoB,EAAE,QAAQ,CAAC,GAAG,MAAM,OAAO,OAAO,MAAM,MAAM,KAAK;AAC7E,SAAK,SAAS,IAAI,WAAW,GAAG;AAGhC,UAAM,UAAmC;AAAA,MACvC,UAAU,KAAK;AAAA,MACf,YAAY;AAAA,MACZ,OAAO,EAAE,MAAM,MAAM,IAAI,KAAK,QAAQ;AAAA,MACtC,eAAe;AAAA,QACb,WAAW;AAAA,QACX,UAAU;AAAA,QACV,aAAa,KAAK;AAAA,MACpB;AAAA,MACA,YAAY;AAAA,MACZ,UAAU;AAAA,IACZ;AAEA,QAAI,KAAK,UAAU;AACjB,cAAQ,WAAW,KAAK;AAAA,IAC1B;AAGA,QAAI,KAAK,UAAU,UAAa,KAAK,YAAY,QAAW;AAC1D,YAAM,YAAqC,CAAC;AAC5C,UAAI,KAAK,UAAU,OAAW,WAAU,QAAQ,KAAK;AACrD,UAAI,KAAK,YAAY,OAAW,WAAU,UAAU,KAAK;AACzD,cAAQ,oBAAoB;AAAA,IAC9B;AAGA,UAAM,UAAU,MAAM;AACpB,UAAI,OAAO;AACX,UAAI,OAAO;AAEX,UAAI,KAAK,IAAI,eAAeA,WAAU,MAAM;AAC1C,YAAI;AACF,eAAK,GAAG,KAAK,KAAK,UAAU,EAAE,YAAY,WAAW,QAAQ,KAAK,CAAC,CAAC;AAAA,QACtE,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AACA,YAAQ,iBAAiB,SAAS,SAAS,EAAE,MAAM,KAAK,CAAC;AAGzD,SAAK,GAAG,KAAK,KAAK,UAAU,OAAO,CAAC;AAGpC,QAAI;AACF,aAAO,MAAM;AACX,YAAI,QAAQ,QAAS;AACrB,YAAI,IAAI,MAAO,OAAM,IAAI;AAEzB,YAAI,IAAI,OAAO,SAAS,GAAG;AACzB,gBAAM,IAAI,OAAO,MAAM;AACvB;AAAA,QACF;AAEA,YAAI,IAAI,KAAM;AAGd,cAAM,IAAI,QAAc,CAAC,YAAY;AACnC,cAAI,OAAO;AAAA,QACb,CAAC;AACD,YAAI,OAAO;AAAA,MACb;AAGA,aAAO,IAAI,OAAO,SAAS,GAAG;AAC5B,cAAM,IAAI,OAAO,MAAM;AAAA,MACzB;AAAA,IACF,UAAE;AACA,cAAQ,oBAAoB,SAAS,OAAO;AAC5C,WAAK,SAAS,OAAO,SAAS;AAAA,IAChC;AAAA,EACF;AAAA;AAAA,EAGQ,mBAAkC;AACxC,QAAI,KAAK,cAAc,KAAK,IAAI,eAAeA,WAAU,MAAM;AAC7D,aAAO,QAAQ,QAAQ;AAAA,IACzB;AAGA,QAAI,KAAK,eAAgB,QAAO,KAAK;AAErC,SAAK,iBAAiB,IAAI,QAAc,CAAC,SAAS,WAAW;AAC3D,YAAM,MAAM,GAAG,gBAAgB,YAAY,KAAK,MAAM,qBAAqB,KAAK,UAAU;AAC1F,MAAAD,KAAI,MAAM,2BAA2B;AAErC,WAAK,KAAK,IAAIC,WAAU,GAAG;AAE3B,WAAK,GAAG,GAAG,QAAQ,MAAM;AACvB,aAAK,aAAa;AAClB,aAAK,iBAAiB;AACtB,QAAAD,KAAI,KAAK,8BAA8B;AACvC,gBAAQ;AAAA,MACV,CAAC;AAED,WAAK,GAAG,GAAG,WAAW,CAAC,SAAS;AAC9B,YAAI;AACF,gBAAM,MAAM,KAAK,MAAM,KAAK,SAAS,CAAC;AACtC,eAAK,cAAc,GAAG;AAAA,QACxB,SAAS,KAAK;AACZ,UAAAA,KAAI,MAAM,qCAAqC,GAAG;AAAA,QACpD;AAAA,MACF,CAAC;AAED,WAAK,GAAG,GAAG,SAAS,CAAC,QAAQ;AAC3B,cAAM,QAAQ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAChE,QAAAA,KAAI,MAAM,6BAA6B,KAAK;AAE5C,mBAAW,OAAO,KAAK,SAAS,OAAO,GAAG;AACxC,cAAI,QAAQ;AACZ,cAAI,OAAO;AAAA,QACb;AACA,aAAK,aAAa;AAClB,aAAK,iBAAiB;AACtB,eAAO,KAAK;AAAA,MACd,CAAC;AAED,WAAK,GAAG,GAAG,SAAS,CAAC,MAAM,WAAW;AACpC,QAAAA,KAAI,MAAM,8BAA8B,IAAI,IAAI,OAAO,SAAS,CAAC,EAAE;AACnE,aAAK,aAAa;AAClB,aAAK,iBAAiB;AAEtB,mBAAW,OAAO,KAAK,SAAS,OAAO,GAAG;AACxC,cAAI,OAAO;AACX,cAAI,OAAO;AAAA,QACb;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAED,WAAO,KAAK;AAAA,EACd;AAAA,EAEQ,cAAc,KAAoC;AACxD,UAAM,YAAY,IAAI;AACtB,QAAI,CAAC,UAAW;AAEhB,UAAM,MAAM,KAAK,SAAS,IAAI,SAAS;AACvC,QAAI,CAAC,IAAK;AAEV,UAAM,OAAO,IAAI;AAEjB,QAAI,SAAS,SAAS;AACpB,YAAM,MAAM,IAAI;AAChB,UAAI,KAAK;AACP,cAAM,MAAM,OAAO,KAAK,KAAK,QAAQ;AACrC,YAAI,OAAO,KAAK,GAAG;AACnB,YAAI,OAAO;AAAA,MACb;AAAA,IACF,WAAW,SAAS,QAAQ;AAC1B,MAAAA,KAAI,MAAM,+BAA+B,SAAS,KAAK,IAAI,OAAO,MAAM,kBAAkB;AAC1F,UAAI,OAAO;AACX,UAAI,OAAO;AAAA,IACb,WAAW,SAAS,SAAS;AAC3B,YAAM,WAAW,IAAI,SAAmB;AACxC,MAAAA,KAAI,MAAM,sBAAsB,SAAS,KAAK,QAAQ,EAAE;AACxD,UAAI,QAAQ,IAAI,MAAM,uBAAuB,QAAQ,EAAE;AACvD,UAAI,OAAO;AAAA,IACb;AAAA,EACF;AACF;","names":["log","WebSocket","log","log","WebSocket"]}
@@ -0,0 +1,8 @@
1
+ import {
2
+ RoomMemory
3
+ } from "./chunk-RQKGHAFV.mjs";
4
+ import "./chunk-BN7PIFNJ.mjs";
5
+ export {
6
+ RoomMemory
7
+ };
8
+ //# sourceMappingURL=room-memory-VAREPHY6.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -0,0 +1,259 @@
1
+ import { AudioSource } from '@dtelecom/server-sdk-node';
2
+
3
+ /**
4
+ * Embedder — local text embedding via @huggingface/transformers.
5
+ *
6
+ * Uses Xenova/all-MiniLM-L6-v2 (384 dimensions, ~22MB model).
7
+ * Runs entirely in-process — no API calls, no cost.
8
+ */
9
+ declare class Embedder {
10
+ private pipeline;
11
+ private initPromise;
12
+ get dimensions(): number;
13
+ /** Load the embedding model. Call once at startup. */
14
+ init(): Promise<void>;
15
+ private loadModel;
16
+ /** Embed a single text. Returns Float32Array of length 384. */
17
+ embed(text: string): Promise<Float32Array>;
18
+ /** Cosine similarity between two normalized vectors. Returns value in [-1, 1]. */
19
+ static cosineSimilarity(a: Float32Array, b: Float32Array): number;
20
+ /** Embed multiple texts in one call (more efficient than calling embed() in a loop). */
21
+ embedBatch(texts: string[]): Promise<Float32Array[]>;
22
+ }
23
+
24
+ /**
25
+ * RoomMemory — high-level persistent memory for a room.
26
+ *
27
+ * Stores all conversation turns, provides semantic search,
28
+ * and generates session summaries on session end.
29
+ *
30
+ * Uses SQLite + sqlite-vec for storage and local embeddings
31
+ * via @huggingface/transformers. Everything runs in-process,
32
+ * no external services needed.
33
+ */
34
+
35
+ interface RoomMemoryConfig {
36
+ /** Path to SQLite database file */
37
+ dbPath: string;
38
+ /** Room name (scopes all data) */
39
+ room: string;
40
+ /** Flush pending turns every N ms (default: 5000) */
41
+ flushIntervalMs?: number;
42
+ }
43
+ declare class RoomMemory {
44
+ private readonly store;
45
+ private readonly embedder;
46
+ private readonly room;
47
+ private sessionId;
48
+ private participants;
49
+ private pendingTurns;
50
+ private flushTimer;
51
+ private readonly flushIntervalMs;
52
+ private flushing;
53
+ constructor(config: RoomMemoryConfig);
54
+ /** Get the embedder instance (for reuse in other components). */
55
+ getEmbedder(): Embedder;
56
+ /** Initialize embedder (loads model). Call once at startup. */
57
+ init(): Promise<void>;
58
+ /** Start a new session for this room. */
59
+ startSession(): string;
60
+ /** Track a participant joining. */
61
+ addParticipant(identity: string): void;
62
+ /**
63
+ * Store a turn to memory. Non-blocking — queues for batch embedding.
64
+ * Call this for EVERY final transcription, even if agent doesn't respond.
65
+ */
66
+ storeTurn(speaker: string, text: string, isAgent: boolean): void;
67
+ /** Flush pending turns: embed and insert into database. */
68
+ private flushPending;
69
+ /**
70
+ * Search memory for context relevant to a query.
71
+ * Returns formatted string ready to inject into LLM system prompt.
72
+ */
73
+ searchRelevant(query: string, turnLimit?: number, sessionLimit?: number): Promise<string>;
74
+ /**
75
+ * End the current session. Generates an LLM summary and stores it.
76
+ */
77
+ endSession(llm: LLMPlugin): Promise<void>;
78
+ /** Close the memory store. Flush pending turns first. */
79
+ close(): Promise<void>;
80
+ }
81
+
82
+ declare class AudioOutput {
83
+ private source;
84
+ private _playing;
85
+ private _responding;
86
+ private _stopped;
87
+ private silenceInterval;
88
+ /** When set, raw PCM from TTS is saved to this directory as WAV files for debugging. */
89
+ dumpDir: string | null;
90
+ private dumpCounter;
91
+ constructor(source: AudioSource);
92
+ get playing(): boolean;
93
+ /**
94
+ * Mark the start of a multi-sentence response.
95
+ * Suppresses silence injection between sentences so partial frames
96
+ * in AudioSource's buffer don't get corrupted by interleaved silence.
97
+ */
98
+ beginResponse(): void;
99
+ /** Mark the end of a response — re-enable silence keepalive. */
100
+ endResponse(): void;
101
+ /**
102
+ * Start sparse silence keepalive to prevent the SFU from dropping the track.
103
+ * With Opus DTX enabled, the encoder handles silence natively — we only need
104
+ * an occasional packet to keep the SSRC alive.
105
+ */
106
+ startSilence(): void;
107
+ /**
108
+ * Write a PCM16 buffer to the audio output.
109
+ * The buffer is split into 20ms frames and fed to AudioSource.
110
+ */
111
+ writeBuffer(pcm16: Buffer): Promise<void>;
112
+ /**
113
+ * Write a stream of PCM16 buffers (from TTS) to the audio output.
114
+ * Supports cancellation via AbortSignal.
115
+ */
116
+ writeStream(stream: AsyncIterable<Buffer>, signal?: AbortSignal): Promise<void>;
117
+ /**
118
+ * Split a PCM16 buffer into 20ms frames and write them at real-time pace.
119
+ * Partial frames at the end are sent directly — AudioSource handles
120
+ * accumulation in its internal buffer.
121
+ */
122
+ private writeFrames;
123
+ /**
124
+ * Write silence frames for the given duration.
125
+ * Used to pad the end of a response so the last Opus frame is fully flushed
126
+ * and the audio doesn't cut off abruptly.
127
+ */
128
+ writeSilence(durationMs: number): Promise<void>;
129
+ /** Flush any buffered audio in AudioSource */
130
+ flush(): void;
131
+ /** Stop the silence keepalive */
132
+ stop(): void;
133
+ }
134
+
135
+ interface TranscriptionResult {
136
+ text: string;
137
+ isFinal: boolean;
138
+ confidence?: number;
139
+ language?: string;
140
+ /** Time in ms from last interim result to this final result (STT + end-of-turn). Only on isFinal. */
141
+ sttDuration?: number;
142
+ }
143
+ interface STTStreamOptions {
144
+ language?: string;
145
+ }
146
+ interface STTStream {
147
+ sendAudio(pcm16: Buffer): void;
148
+ on(event: 'transcription', cb: (result: TranscriptionResult) => void): this;
149
+ on(event: 'error', cb: (error: Error) => void): this;
150
+ close(): Promise<void>;
151
+ }
152
+ interface STTPlugin {
153
+ createStream(options?: STTStreamOptions): STTStream;
154
+ }
155
+ interface Message {
156
+ role: 'system' | 'user' | 'assistant';
157
+ content: string;
158
+ }
159
+ interface LLMChunk {
160
+ type: 'token' | 'tool_call' | 'done';
161
+ token?: string;
162
+ toolCall?: {
163
+ name: string;
164
+ arguments: string;
165
+ };
166
+ usage?: {
167
+ promptTokens: number;
168
+ completionTokens: number;
169
+ };
170
+ }
171
+ interface LLMPlugin {
172
+ chat(messages: Message[], signal?: AbortSignal): AsyncGenerator<LLMChunk>;
173
+ /** Optional: warm up the LLM connection with the system prompt */
174
+ warmup?(systemPrompt: string): Promise<void>;
175
+ }
176
+ interface TTSPlugin {
177
+ synthesize(text: string, signal?: AbortSignal): AsyncGenerator<Buffer>;
178
+ /** Optional: pre-connect to TTS server */
179
+ warmup?(): Promise<void>;
180
+ }
181
+ interface MemoryConfig {
182
+ /** Enable persistent room memory (requires better-sqlite3, sqlite-vec, @huggingface/transformers) */
183
+ enabled: boolean;
184
+ /** Path to SQLite database file (default: './data/memory.db') */
185
+ dbPath?: string;
186
+ }
187
+ type RespondMode = 'always' | 'addressed';
188
+ interface AgentConfig {
189
+ stt: STTPlugin;
190
+ tts?: TTSPlugin;
191
+ llm: LLMPlugin;
192
+ instructions: string;
193
+ /** When to respond: 'always' (1:1 mode) or 'addressed' (multi-participant). Default: 'always' */
194
+ respondMode?: RespondMode;
195
+ /** Agent name for addressing detection (default: 'assistant') */
196
+ agentName?: string;
197
+ /** Additional name variants to respond to (e.g. ['bot', 'ai']) */
198
+ nameVariants?: string[];
199
+ /** Called when a data channel message is received */
200
+ onDataMessage?: DataMessageHandler;
201
+ /** Persistent memory across sessions (stores turns, enables semantic search) */
202
+ memory?: MemoryConfig;
203
+ }
204
+ interface AgentStartOptions {
205
+ room: string;
206
+ apiKey: string;
207
+ apiSecret: string;
208
+ identity?: string;
209
+ name?: string;
210
+ }
211
+ type DataMessageHandler = (payload: Uint8Array, participantIdentity: string, topic?: string) => void;
212
+ interface PipelineOptions {
213
+ stt: STTPlugin;
214
+ llm: LLMPlugin;
215
+ tts?: TTSPlugin;
216
+ instructions: string;
217
+ audioOutput: AudioOutput;
218
+ /** Silence timeout for turn detection (default: 800ms) */
219
+ silenceTimeoutMs?: number;
220
+ /** When to respond: 'always' or 'addressed' (default: 'always') */
221
+ respondMode?: RespondMode;
222
+ /** Agent name for addressing detection */
223
+ agentName?: string;
224
+ /** Additional name variants */
225
+ nameVariants?: string[];
226
+ /**
227
+ * Hook called before responding to a turn. Return false to skip responding.
228
+ * Use for custom response logic (e.g., keyword filtering, rate limiting).
229
+ */
230
+ beforeRespond?: (speaker: string, text: string) => boolean | Promise<boolean>;
231
+ /** Room memory instance (injected by VoiceAgent if memory is enabled) */
232
+ memory?: RoomMemory;
233
+ }
234
+ type AgentState = 'idle' | 'listening' | 'thinking' | 'speaking';
235
+ interface AgentEvents {
236
+ transcription: (result: TranscriptionResult & {
237
+ speaker: string;
238
+ }) => void;
239
+ /** Emitted after each sentence finishes playing via TTS. */
240
+ sentence: (text: string) => void;
241
+ /** Emitted after the full response finishes playing. */
242
+ response: (text: string) => void;
243
+ /** Agent state: idle → listening (STT active) → thinking (LLM) → speaking (audio) → idle. */
244
+ agentState: (state: AgentState) => void;
245
+ error: (error: Error) => void;
246
+ connected: () => void;
247
+ disconnected: (reason?: string) => void;
248
+ }
249
+ interface PipelineEvents {
250
+ transcription: (result: TranscriptionResult & {
251
+ speaker: string;
252
+ }) => void;
253
+ sentence: (text: string) => void;
254
+ response: (text: string) => void;
255
+ agentState: (state: AgentState) => void;
256
+ error: (error: Error) => void;
257
+ }
258
+
259
+ export { type AgentConfig as A, type DataMessageHandler as D, Embedder as E, type LLMPlugin as L, type Message as M, type PipelineOptions as P, type RespondMode as R, type STTStream as S, type TranscriptionResult as T, type AgentStartOptions as a, type AgentState as b, type AgentEvents as c, AudioOutput as d, type LLMChunk as e, type MemoryConfig as f, type PipelineEvents as g, RoomMemory as h, type RoomMemoryConfig as i, type STTPlugin as j, type STTStreamOptions as k, type TTSPlugin as l };
@@ -0,0 +1,259 @@
1
+ import { AudioSource } from '@dtelecom/server-sdk-node';
2
+
3
+ /**
4
+ * Embedder — local text embedding via @huggingface/transformers.
5
+ *
6
+ * Uses Xenova/all-MiniLM-L6-v2 (384 dimensions, ~22MB model).
7
+ * Runs entirely in-process — no API calls, no cost.
8
+ */
9
+ declare class Embedder {
10
+ private pipeline;
11
+ private initPromise;
12
+ get dimensions(): number;
13
+ /** Load the embedding model. Call once at startup. */
14
+ init(): Promise<void>;
15
+ private loadModel;
16
+ /** Embed a single text. Returns Float32Array of length 384. */
17
+ embed(text: string): Promise<Float32Array>;
18
+ /** Cosine similarity between two normalized vectors. Returns value in [-1, 1]. */
19
+ static cosineSimilarity(a: Float32Array, b: Float32Array): number;
20
+ /** Embed multiple texts in one call (more efficient than calling embed() in a loop). */
21
+ embedBatch(texts: string[]): Promise<Float32Array[]>;
22
+ }
23
+
24
+ /**
25
+ * RoomMemory — high-level persistent memory for a room.
26
+ *
27
+ * Stores all conversation turns, provides semantic search,
28
+ * and generates session summaries on session end.
29
+ *
30
+ * Uses SQLite + sqlite-vec for storage and local embeddings
31
+ * via @huggingface/transformers. Everything runs in-process,
32
+ * no external services needed.
33
+ */
34
+
35
+ interface RoomMemoryConfig {
36
+ /** Path to SQLite database file */
37
+ dbPath: string;
38
+ /** Room name (scopes all data) */
39
+ room: string;
40
+ /** Flush pending turns every N ms (default: 5000) */
41
+ flushIntervalMs?: number;
42
+ }
43
+ declare class RoomMemory {
44
+ private readonly store;
45
+ private readonly embedder;
46
+ private readonly room;
47
+ private sessionId;
48
+ private participants;
49
+ private pendingTurns;
50
+ private flushTimer;
51
+ private readonly flushIntervalMs;
52
+ private flushing;
53
+ constructor(config: RoomMemoryConfig);
54
+ /** Get the embedder instance (for reuse in other components). */
55
+ getEmbedder(): Embedder;
56
+ /** Initialize embedder (loads model). Call once at startup. */
57
+ init(): Promise<void>;
58
+ /** Start a new session for this room. */
59
+ startSession(): string;
60
+ /** Track a participant joining. */
61
+ addParticipant(identity: string): void;
62
+ /**
63
+ * Store a turn to memory. Non-blocking — queues for batch embedding.
64
+ * Call this for EVERY final transcription, even if agent doesn't respond.
65
+ */
66
+ storeTurn(speaker: string, text: string, isAgent: boolean): void;
67
+ /** Flush pending turns: embed and insert into database. */
68
+ private flushPending;
69
+ /**
70
+ * Search memory for context relevant to a query.
71
+ * Returns formatted string ready to inject into LLM system prompt.
72
+ */
73
+ searchRelevant(query: string, turnLimit?: number, sessionLimit?: number): Promise<string>;
74
+ /**
75
+ * End the current session. Generates an LLM summary and stores it.
76
+ */
77
+ endSession(llm: LLMPlugin): Promise<void>;
78
+ /** Close the memory store. Flush pending turns first. */
79
+ close(): Promise<void>;
80
+ }
81
+
82
+ declare class AudioOutput {
83
+ private source;
84
+ private _playing;
85
+ private _responding;
86
+ private _stopped;
87
+ private silenceInterval;
88
+ /** When set, raw PCM from TTS is saved to this directory as WAV files for debugging. */
89
+ dumpDir: string | null;
90
+ private dumpCounter;
91
+ constructor(source: AudioSource);
92
+ get playing(): boolean;
93
+ /**
94
+ * Mark the start of a multi-sentence response.
95
+ * Suppresses silence injection between sentences so partial frames
96
+ * in AudioSource's buffer don't get corrupted by interleaved silence.
97
+ */
98
+ beginResponse(): void;
99
+ /** Mark the end of a response — re-enable silence keepalive. */
100
+ endResponse(): void;
101
+ /**
102
+ * Start sparse silence keepalive to prevent the SFU from dropping the track.
103
+ * With Opus DTX enabled, the encoder handles silence natively — we only need
104
+ * an occasional packet to keep the SSRC alive.
105
+ */
106
+ startSilence(): void;
107
+ /**
108
+ * Write a PCM16 buffer to the audio output.
109
+ * The buffer is split into 20ms frames and fed to AudioSource.
110
+ */
111
+ writeBuffer(pcm16: Buffer): Promise<void>;
112
+ /**
113
+ * Write a stream of PCM16 buffers (from TTS) to the audio output.
114
+ * Supports cancellation via AbortSignal.
115
+ */
116
+ writeStream(stream: AsyncIterable<Buffer>, signal?: AbortSignal): Promise<void>;
117
+ /**
118
+ * Split a PCM16 buffer into 20ms frames and write them at real-time pace.
119
+ * Partial frames at the end are sent directly — AudioSource handles
120
+ * accumulation in its internal buffer.
121
+ */
122
+ private writeFrames;
123
+ /**
124
+ * Write silence frames for the given duration.
125
+ * Used to pad the end of a response so the last Opus frame is fully flushed
126
+ * and the audio doesn't cut off abruptly.
127
+ */
128
+ writeSilence(durationMs: number): Promise<void>;
129
+ /** Flush any buffered audio in AudioSource */
130
+ flush(): void;
131
+ /** Stop the silence keepalive */
132
+ stop(): void;
133
+ }
134
+
135
+ interface TranscriptionResult {
136
+ text: string;
137
+ isFinal: boolean;
138
+ confidence?: number;
139
+ language?: string;
140
+ /** Time in ms from last interim result to this final result (STT + end-of-turn). Only on isFinal. */
141
+ sttDuration?: number;
142
+ }
143
+ interface STTStreamOptions {
144
+ language?: string;
145
+ }
146
+ interface STTStream {
147
+ sendAudio(pcm16: Buffer): void;
148
+ on(event: 'transcription', cb: (result: TranscriptionResult) => void): this;
149
+ on(event: 'error', cb: (error: Error) => void): this;
150
+ close(): Promise<void>;
151
+ }
152
+ interface STTPlugin {
153
+ createStream(options?: STTStreamOptions): STTStream;
154
+ }
155
+ interface Message {
156
+ role: 'system' | 'user' | 'assistant';
157
+ content: string;
158
+ }
159
+ interface LLMChunk {
160
+ type: 'token' | 'tool_call' | 'done';
161
+ token?: string;
162
+ toolCall?: {
163
+ name: string;
164
+ arguments: string;
165
+ };
166
+ usage?: {
167
+ promptTokens: number;
168
+ completionTokens: number;
169
+ };
170
+ }
171
+ interface LLMPlugin {
172
+ chat(messages: Message[], signal?: AbortSignal): AsyncGenerator<LLMChunk>;
173
+ /** Optional: warm up the LLM connection with the system prompt */
174
+ warmup?(systemPrompt: string): Promise<void>;
175
+ }
176
+ interface TTSPlugin {
177
+ synthesize(text: string, signal?: AbortSignal): AsyncGenerator<Buffer>;
178
+ /** Optional: pre-connect to TTS server */
179
+ warmup?(): Promise<void>;
180
+ }
181
+ interface MemoryConfig {
182
+ /** Enable persistent room memory (requires better-sqlite3, sqlite-vec, @huggingface/transformers) */
183
+ enabled: boolean;
184
+ /** Path to SQLite database file (default: './data/memory.db') */
185
+ dbPath?: string;
186
+ }
187
+ type RespondMode = 'always' | 'addressed';
188
+ interface AgentConfig {
189
+ stt: STTPlugin;
190
+ tts?: TTSPlugin;
191
+ llm: LLMPlugin;
192
+ instructions: string;
193
+ /** When to respond: 'always' (1:1 mode) or 'addressed' (multi-participant). Default: 'always' */
194
+ respondMode?: RespondMode;
195
+ /** Agent name for addressing detection (default: 'assistant') */
196
+ agentName?: string;
197
+ /** Additional name variants to respond to (e.g. ['bot', 'ai']) */
198
+ nameVariants?: string[];
199
+ /** Called when a data channel message is received */
200
+ onDataMessage?: DataMessageHandler;
201
+ /** Persistent memory across sessions (stores turns, enables semantic search) */
202
+ memory?: MemoryConfig;
203
+ }
204
+ interface AgentStartOptions {
205
+ room: string;
206
+ apiKey: string;
207
+ apiSecret: string;
208
+ identity?: string;
209
+ name?: string;
210
+ }
211
+ type DataMessageHandler = (payload: Uint8Array, participantIdentity: string, topic?: string) => void;
212
+ interface PipelineOptions {
213
+ stt: STTPlugin;
214
+ llm: LLMPlugin;
215
+ tts?: TTSPlugin;
216
+ instructions: string;
217
+ audioOutput: AudioOutput;
218
+ /** Silence timeout for turn detection (default: 800ms) */
219
+ silenceTimeoutMs?: number;
220
+ /** When to respond: 'always' or 'addressed' (default: 'always') */
221
+ respondMode?: RespondMode;
222
+ /** Agent name for addressing detection */
223
+ agentName?: string;
224
+ /** Additional name variants */
225
+ nameVariants?: string[];
226
+ /**
227
+ * Hook called before responding to a turn. Return false to skip responding.
228
+ * Use for custom response logic (e.g., keyword filtering, rate limiting).
229
+ */
230
+ beforeRespond?: (speaker: string, text: string) => boolean | Promise<boolean>;
231
+ /** Room memory instance (injected by VoiceAgent if memory is enabled) */
232
+ memory?: RoomMemory;
233
+ }
234
+ type AgentState = 'idle' | 'listening' | 'thinking' | 'speaking';
235
+ interface AgentEvents {
236
+ transcription: (result: TranscriptionResult & {
237
+ speaker: string;
238
+ }) => void;
239
+ /** Emitted after each sentence finishes playing via TTS. */
240
+ sentence: (text: string) => void;
241
+ /** Emitted after the full response finishes playing. */
242
+ response: (text: string) => void;
243
+ /** Agent state: idle → listening (STT active) → thinking (LLM) → speaking (audio) → idle. */
244
+ agentState: (state: AgentState) => void;
245
+ error: (error: Error) => void;
246
+ connected: () => void;
247
+ disconnected: (reason?: string) => void;
248
+ }
249
+ interface PipelineEvents {
250
+ transcription: (result: TranscriptionResult & {
251
+ speaker: string;
252
+ }) => void;
253
+ sentence: (text: string) => void;
254
+ response: (text: string) => void;
255
+ agentState: (state: AgentState) => void;
256
+ error: (error: Error) => void;
257
+ }
258
+
259
+ export { type AgentConfig as A, type DataMessageHandler as D, Embedder as E, type LLMPlugin as L, type Message as M, type PipelineOptions as P, type RespondMode as R, type STTStream as S, type TranscriptionResult as T, type AgentStartOptions as a, type AgentState as b, type AgentEvents as c, AudioOutput as d, type LLMChunk as e, type MemoryConfig as f, type PipelineEvents as g, RoomMemory as h, type RoomMemoryConfig as i, type STTPlugin as j, type STTStreamOptions as k, type TTSPlugin as l };