titan-agent 5.5.13 → 5.5.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,7 +14,7 @@ const CLOUD_MODEL_CTX = {
14
14
  "glm-5.1:cloud": 198656,
15
15
  // GLM-5 — 128K context
16
16
  "glm-5:cloud": 131072,
17
- // Kimi K2.5 — 256K context (native multimodal agentic, agent swarm)
17
+ // Kimi K2.5 — 256K context (native multimodal agentic)
18
18
  "kimi-k2.5:cloud": 262144,
19
19
  // Kimi K2.6 — 256K context (next-gen agentic, enhanced reasoning)
20
20
  "kimi-k2.6:cloud": 262144,
@@ -107,7 +107,7 @@ const MODEL_CAPABILITIES = {
107
107
  // ── Nemotron — Super is 256K MoE optimized for collaborative agents ──
108
108
  "nemotron-3-super": { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: true, toolTemperature: 0.4 },
109
109
  "nemotron-3-nano": { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },
110
- // ── Kimi K2.5 — 256K, native agentic, agent swarm decomposition ──
110
+ // ── Kimi K2.5 / K2.6 — 256K, native agentic ──
111
111
  "kimi-k2.5": { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },
112
112
  "kimi-k2.6": { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },
113
113
  // ── Devstral — code-focused ──
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/providers/ollama.ts"],"sourcesContent":["/**\n * TITAN — Ollama Provider (Local LLMs)\n */\nimport {\n LLMProvider,\n type ChatOptions,\n type ChatResponse,\n type ChatStreamChunk,\n type ToolCall,\n} from './base.js';\nimport { loadConfig } from '../config/config.js';\nimport logger from '../utils/logger.js';\nimport { fetchWithRetry } from '../utils/helpers.js';\nimport { v4 as uuid } from 'uuid';\nimport * as fs from 'fs';\nimport { clampMaxTokens } from './modelCapabilities.js';\n\nconst COMPONENT = 'Ollama';\n\n/**\n * Per-model context window map for Ollama cloud models.\n * Auto-configures num_ctx to each model's actual maximum to prevent truncation.\n * Sources: Ollama Cloud model cards, March 2026.\n */\nconst CLOUD_MODEL_CTX: Record<string, number> = {\n // GLM-5.1 — 198K context (newest agentic flagship, SOTA SWE-Bench Pro)\n 'glm-5.1:cloud': 198656,\n // GLM-5 — 128K context\n 'glm-5:cloud': 131072,\n // Kimi K2.5 — 256K context (native multimodal agentic, agent swarm)\n 'kimi-k2.5:cloud': 262144,\n // Kimi K2.6 — 256K context (next-gen agentic, enhanced reasoning)\n 'kimi-k2.6:cloud': 262144,\n // Qwen3 Coder Next — 262K context (massive)\n 'qwen3-coder-next:cloud': 262144,\n // Qwen3.5 397B Cloud — 256K context (all variants support 256K)\n 'qwen3.5:397b-cloud': 262144,\n // DeepSeek V3.1 — 128K context\n 'deepseek-v3.1:671b-cloud': 131072,\n // DeepSeek V3.2 — 160K context (DSA long-context optimized)\n 'deepseek-v3.2:671b-cloud': 163840,\n 'deepseek-v3.2:cloud': 163840,\n // Devstral 2 — 128K context\n 'devstral-2:cloud': 131072,\n // Devstral Small 2 (local) — 32K\n 'devstral-small-2': 32768,\n 'devstral-small-2:latest': 32768,\n // Nemotron 3 Nano — 1M native, 32K practical for local\n 'nemotron-3-nano': 32768,\n 'nemotron-3-nano:latest': 32768,\n 'nemotron-3-nano:4b': 32768,\n 'nemotron-3-nano:30b': 32768,\n // Nemotron 3 Super — 256K context (MoE 120B/12B active)\n 'nemotron-3-super:cloud': 262144,\n // Gemini 3 Flash — 1M context\n 'gemini-3-flash-preview:latest': 1048576,\n // GPT OSS — 128K\n 'gpt-oss:120b-cloud': 131072,\n // MiniMax M2.7 — 200K context (Agent Teams, dynamic tool search)\n 'minimax-m2.7:cloud': 204800,\n // Gemma 4 — 256K context (native function calling)\n 'gemma4:cloud': 262144,\n // Qwen3.5 35B local — 32K\n 'qwen3.5:35b': 32768,\n};\n\n/**\n * Model capability profiles — controls how TITAN adapts to each model's strengths.\n * Instead of blanket rules for all models, each model gets tuned behavior.\n *\n * selfSelectsTools: Model picks tools well on its own — don't force tool_choice='required'\n * thinkingWithTools: Model benefits from thinking (<think> tags) during tool calling\n * needsSystemMerge: Model ignores standalone system messages — merge into first user msg\n * toolTemperature: Optimal temperature for tool-calling tasks (null = use caller's value or 0.5 default)\n * toolTopP: Optimal top_p for tool calling (null = omit)\n * toolTopK: Optimal top_k for tool calling (null = omit)\n */\ninterface ModelCapabilities {\n selfSelectsTools: boolean;\n thinkingWithTools: boolean;\n needsSystemMerge: boolean;\n toolTemperature: number | null;\n toolTopP: number | null;\n toolTopK: number | null;\n}\n\nconst DEFAULT_CAPABILITIES: ModelCapabilities = {\n selfSelectsTools: false,\n thinkingWithTools: false,\n needsSystemMerge: true, // Conservative default: merge for unknown models\n toolTemperature: 0.5,\n toolTopP: null,\n toolTopK: null,\n};\n\n/** Heuristic: infer capabilities from model name patterns when no hardcoded\n * entry exists. Most modern models (2024+) support native tool calling and\n * handle system prompts correctly. This prevents unknown models from being\n * crippled by overly conservative defaults. */\nfunction inferCapabilitiesFromName(modelName: string): Partial<ModelCapabilities> | undefined {\n const lower = modelName.toLowerCase();\n\n // Cloud-hosted models are almost always modern and capable\n if (lower.includes(':cloud') || lower.includes('-cloud')) {\n return { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 };\n }\n\n // Large local models (30B+) are typically capable\n const sizeMatch = lower.match(/(\\d+)b/);\n if (sizeMatch) {\n const size = parseInt(sizeMatch[1], 10);\n if (size >= 30) {\n return { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 };\n }\n }\n\n // Known-capable families by name pattern (even if not in hardcoded map)\n const capableFamilies = ['qwen', 'glm', 'deepseek', 'kimi', 'gemma', 'nemotron', 'devstral', 'gemini', 'mistral-large', 'llama3.3', 'llama4', 'phi4', 'command-r-plus'];\n for (const family of capableFamilies) {\n if (lower.includes(family)) {\n return { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 };\n }\n }\n\n // Truly unknown small local models — stay conservative\n return undefined;\n}\n\nconst MODEL_CAPABILITIES: Record<string, Partial<ModelCapabilities>> = {\n // ── Qwen family — excellent tool calling, uses thinking ──\n 'qwen3.5': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.7 },\n 'qwen3': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.7 },\n 'qwen3-coder-next': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.5 },\n\n // ── DeepSeek family — strong reasoning, good tool use ──\n 'deepseek-v3': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'deepseek-v3.1': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'deepseek-v3.2': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n\n // ── MiniMax M2.7 — XML tool format, needs special handling ──\n // Hunt Finding #05 (2026-04-14): flipped selfSelectsTools from true→false.\n // Confirmed by reproducing: a \"use shell to run uptime\" prompt returned\n // fabricated uptime text with no tool call. The model hallucinates instead\n // of calling tools when given the choice. Setting false forces the agent\n // loop's forceToolUse to fire `tool_choice: required`, which prevents this\n // class of hallucination at the API level.\n 'minimax-m2.7': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.8, toolTopP: 0.95, toolTopK: 40 },\n 'minimax-m2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.8, toolTopP: 0.95 },\n\n // ── Gemma family — good tool use, no thinking ──\n 'gemma4': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 1.0, toolTopP: 0.95, toolTopK: 64 },\n 'gemma-3': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n\n // ── GLM family — GLM-5.1 is agentic flagship, SOTA SWE-Bench Pro, 198K ctx ──\n 'glm-5.1': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'glm-5': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n 'glm-4.7': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n\n // ── Nemotron — Super is 256K MoE optimized for collaborative agents ──\n 'nemotron-3-super': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: true, toolTemperature: 0.4 },\n 'nemotron-3-nano': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n\n // ── Kimi K2.5 — 256K, native agentic, agent swarm decomposition ──\n 'kimi-k2.5': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'kimi-k2.6': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n\n // ── Devstral — code-focused ──\n 'devstral-2': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.4 },\n 'devstral-small-2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n\n // ── Gemini — handles system messages well ──\n 'gemini-3-flash': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 },\n\n // ── Llama/Mistral — weaker tool calling ──\n 'llama3.1': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n 'llama3.2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n 'mistral': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n};\n\n/** Resolve capabilities for a model.\n *\n * Lookup order:\n * 1. Empirical probe result from capabilities registry (~/.titan/model-capabilities.json)\n * — This reflects ACTUAL behavior tested against the live model\n * 2. Hardcoded MODEL_CAPABILITIES map (this file) — matched by longest prefix\n * 3. DEFAULT_CAPABILITIES — conservative fallback for unknown models\n */\nfunction getModelCapabilities(modelName: string): ModelCapabilities {\n // Step 1: Check empirical probe registry (preferred)\n try {\n // eslint-disable-next-line @typescript-eslint/no-require-imports\n const { getProbeResult, isProbeStale } = require('../agent/capabilitiesRegistry.js') as typeof import('../agent/capabilitiesRegistry.js');\n const probe = getProbeResult(modelName) || getProbeResult(`ollama/${modelName}`);\n if (probe && !isProbeStale(probe)) {\n // Convert probe result to capability flags\n return {\n ...DEFAULT_CAPABILITIES,\n selfSelectsTools: probe.nativeToolCalls,\n thinkingWithTools: probe.hasThinkingMode && !probe.needsExplicitThinkFalse,\n needsSystemMerge: !probe.respectsSystemPrompt,\n toolTemperature: probe.nativeToolCalls ? 0.5 : 0.3,\n toolTopP: null,\n toolTopK: null,\n };\n }\n } catch {\n // Registry not available (e.g., during tests) — fall through\n }\n\n/** Track which unknown models we've already triggered background probes for */\nconst probeInFlight = new Set<string>();\n\n/** Trigger a background capability probe for an unknown model.\n * Fire-and-forget: the next request will pick up the result from the registry. */\nfunction triggerBackgroundProbe(modelName: string): void {\n if (probeInFlight.has(modelName)) return;\n probeInFlight.add(modelName);\n // Dynamic import to avoid circular deps at module load time\n import('../agent/modelProbe.js')\n .then(({ probeModel }) => probeModel(`ollama/${modelName}`))\n .then((result) => import('../agent/capabilitiesRegistry.js')\n .then(({ recordProbeResult }) => {\n recordProbeResult(result);\n logger.info(COMPONENT, `Background probe complete for ${modelName}: nativeTools=${result.nativeToolCalls}, respectsSystem=${result.respectsSystemPrompt}`);\n }))\n .catch((err) => logger.warn(COMPONENT, `Background probe failed for ${modelName}: ${(err as Error).message}`))\n .finally(() => probeInFlight.delete(modelName));\n}\n\n // Step 2: Hardcoded map (prefix-matched, longest wins)\n const bare = modelName.includes('/') ? modelName.split('/').slice(1).join('/') : modelName;\n // Strip version/tag suffix (e.g. \":cloud\", \":4b\", \":latest\", \":4b-cloud\")\n const noTag = bare.replace(/:(cloud|latest|\\d+b(-cloud)?)$/i, '');\n // Strip known custom name prefixes that operators add (e.g. \"titan-qwen3.5\" → \"qwen3.5\",\n // \"myorg-deepseek-v3\" → \"deepseek-v3\"). We do a single pass removing any word-prefix\n // that is NOT itself a known model family, up to the first recognised pattern in the map.\n const knownPrefixes = /^(titan|local|custom|myorg|org|priv|private|dev|test|prod|staging)-/i;\n const baseName = noTag.replace(knownPrefixes, '');\n\n let bestMatch: Partial<ModelCapabilities> | undefined;\n let bestLen = 0;\n for (const [pattern, caps] of Object.entries(MODEL_CAPABILITIES)) {\n if (baseName === pattern || baseName.startsWith(pattern)) {\n if (pattern.length > bestLen) {\n bestMatch = caps;\n bestLen = pattern.length;\n }\n }\n }\n\n if (!bestMatch) {\n // Try heuristic inference from model name before falling back to defaults\n const inferred = inferCapabilitiesFromName(modelName);\n if (inferred) {\n logger.info(COMPONENT, `Model \"${modelName}\" not in hardcoded map — using inferred capabilities: ${JSON.stringify(inferred)}`);\n bestMatch = inferred;\n } else {\n logger.info(COMPONENT, `Model \"${modelName}\" not in capabilities database or registry — using conservative defaults. Triggering background probe...`);\n triggerBackgroundProbe(modelName);\n }\n }\n return { ...DEFAULT_CAPABILITIES, ...(bestMatch || {}) };\n}\n\n/** Get the optimal num_ctx for a given model name */\nfunction getModelCtx(modelName: string): number {\n const bare = modelName.includes('/') ? modelName.split('/').slice(1).join('/') : modelName;\n if (CLOUD_MODEL_CTX[bare]) return CLOUD_MODEL_CTX[bare];\n\n // Heuristic: modern cloud models typically have 128K+ context\n if (bare.endsWith(':cloud') || bare.endsWith('-cloud')) return 131072;\n\n // Heuristic: large local models (30B+) often support 32K-64K\n const sizeMatch = bare.match(/(\\d+)b/i);\n if (sizeMatch) {\n const size = parseInt(sizeMatch[1], 10);\n if (size >= 70) return 65536;\n if (size >= 30) return 32768;\n if (size >= 14) return 16384;\n }\n\n // Conservative fallback for tiny unknown local models\n return 8192;\n}\n\n/** Max system prompt length for cloud models with tool calling.\n * Cloud models have 128K+ context — keep this high enough to always include\n * the full descriptions of any tools actively being used in the current task.\n */\nconst CLOUD_MAX_SYSTEM_PROMPT = 8000;\n\n/** Compress a system prompt for cloud models with tool calling.\n * Preserves (in priority order):\n * 1. Tool Execution rules (ReAct loop, MUST/NEVER — highest priority)\n * 2. Active tool descriptions (tools currently in use — must not be stripped)\n * 3. Identity\n * 4. Brief capabilities + behavior reminder\n *\n * @param content The full system prompt to compress\n * @param activeTools Descriptions of tools actively in use — always preserved\n */\nfunction compressSystemPrompt(content: string, activeTools?: Array<{ name: string; description: string }>): string {\n if (content.length <= CLOUD_MAX_SYSTEM_PROMPT) return content;\n\n const sections: string[] = [];\n\n // 1. Tool Execution rules — always first, always preserved\n const toolExecMatch = content.match(/## Tool Execution — HIGHEST PRIORITY[\\s\\S]*?(?=\\n## CRITICAL)/);\n if (toolExecMatch) {\n sections.push(toolExecMatch[0].trim());\n } else {\n sections.push(`## Tool Execution — HIGHEST PRIORITY\nYou are an AI agent. Your PRIMARY function is to execute tasks using tools.\n\nReAct Loop: THINK → ACT (call tool) → OBSERVE (read result) → REPEAT until done.\n\nMUST: call web_search+web_fetch for factual questions, call write_file/edit_file to save files (NEVER output file content as text), call shell for commands, call tool_search if unsure which tool to use.\nNEVER: describe what you could do, output file content inline, generate current facts from memory, tell user to visit a URL.\n\nRight: asked to write a file → call write_file immediately.\nWrong: asked to write a file → output the content as text in your reply.`);\n }\n\n // 2. Identity (shortened)\n const identityMatch = content.match(/## CRITICAL: Your Identity[\\s\\S]*?(?=\\n## )/);\n if (identityMatch) sections.push(identityMatch[0].trim());\n\n // 3. Brief capabilities + behavior\n sections.push('## Tools Available\\nShell, file read/write/edit, web search/fetch, browser, memory, weather, code execution, gmail, gdrive, gcal_personal, gtasks, gcontacts. Use tool_search to discover any tool not listed here.');\n sections.push('## Behavior\\n- Lead with action — call tools immediately, explain briefly after\\n- Never re-plan mid-task after CONFIRM — execute directly\\n- Confirm before destructive operations');\n\n // 4. Active tool descriptions — only inject if budget allows (max 2000 chars for tools).\n // This prevents the model from forgetting available actions mid-task (e.g. after CONFIRM).\n if (activeTools && activeTools.length > 0) {\n const TOOL_BUDGET = 2000;\n const toolLines: string[] = [];\n let toolChars = 0;\n for (const t of activeTools) {\n // Use first 150 chars of description to keep it compact\n const desc = t.description.length > 150 ? t.description.slice(0, 147) + '...' : t.description;\n const line = `- **${t.name}**: ${desc}`;\n if (toolChars + line.length > TOOL_BUDGET) break;\n toolLines.push(line);\n toolChars += line.length;\n }\n if (toolLines.length > 0) {\n sections.push(`## Active Tools\\n${toolLines.join('\\n')}`);\n }\n }\n\n const compressed = sections.join('\\n\\n');\n // Safety: never return something larger than the original\n if (compressed.length >= content.length) {\n logger.info(COMPONENT, `Compressed prompt would be larger (${compressed.length} vs ${content.length}), using truncated original`);\n return content.slice(0, CLOUD_MAX_SYSTEM_PROMPT);\n }\n logger.info(COMPONENT, `Compressed system prompt for cloud model: ${content.length} → ${compressed.length} chars`);\n return compressed;\n}\n\n/**\n * Trim messages for cloud models while preserving tool call/response pairs.\n * Naive slicing can split a tool call from its response, breaking the tool calling contract.\n * This walks backwards keeping assistant+tool pairs together.\n */\nfunction trimPreservingToolPairs(msgs: Array<Record<string, unknown>>, maxTotal: number): Array<Record<string, unknown>> {\n const systemMsgs = msgs.filter(m => m.role === 'system');\n const nonSystem = msgs.filter(m => m.role !== 'system');\n const maxNonSystem = maxTotal - systemMsgs.length;\n\n if (nonSystem.length <= maxNonSystem) return msgs;\n\n // Walk backwards, keeping tool/assistant pairs together\n const kept: Array<Record<string, unknown>> = [];\n let i = nonSystem.length - 1;\n while (i >= 0 && kept.length < maxNonSystem) {\n const msg = nonSystem[i];\n if (msg.role === 'tool') {\n // Keep this tool result and find its assistant parent\n kept.unshift(msg);\n for (let j = i - 1; j >= 0; j--) {\n if (nonSystem[j].role === 'assistant' && (nonSystem[j].tool_calls || nonSystem[j].toolCalls)) {\n kept.unshift(nonSystem[j]);\n i = j - 1;\n break;\n }\n if (nonSystem[j].role === 'tool') {\n // Sibling tool result from same batch\n kept.unshift(nonSystem[j]);\n } else {\n i = j;\n break;\n }\n }\n } else {\n kept.unshift(msg);\n i--;\n }\n }\n\n return [...systemMsgs, ...kept];\n}\n\n/** Simplify tool parameter schemas for cloud models.\n * Strips Zod artifacts ($schema, additionalProperties, etc.) that can\n * confuse cloud model tool-calling.\n */\nfunction simplifySchema(schema: Record<string, unknown> | undefined): Record<string, unknown> {\n if (!schema) return { type: 'object', properties: {} };\n const clean: Record<string, unknown> = { type: schema.type || 'object' };\n if (schema.properties) {\n const props: Record<string, unknown> = {};\n for (const [key, val] of Object.entries(schema.properties as Record<string, Record<string, unknown>>)) {\n // Flatten each property to just type + description\n const prop: Record<string, unknown> = { type: val.type || 'string' };\n if (val.description) prop.description = val.description;\n if (val.enum) prop.enum = val.enum;\n if (val.default !== undefined) prop.default = val.default;\n props[key] = prop;\n }\n clean.properties = props;\n }\n if (schema.required) clean.required = schema.required;\n return clean;\n}\n\nexport class OllamaProvider extends LLMProvider {\n readonly name = 'ollama';\n readonly displayName = 'Ollama (Local)';\n\n private get baseUrl(): string {\n const config = loadConfig();\n return config.providers.ollama.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';\n }\n\n async chat(options: ChatOptions): Promise<ChatResponse> {\n const model = (options.model || 'llama3.1').replace('ollama/', '');\n logger.debug(COMPONENT, `Chat request: model=${model}, messages=${options.messages.length}`);\n\n // Cloud models (Ollama Pro): detect via -cloud suffix or :cloud tag\n const isCloudModel = model.includes('-cloud') || model.includes(':cloud');\n const hasTools = options.tools && options.tools.length > 0;\n const hasToolRoleMessages = options.messages.some(m => m.role === 'tool');\n\n const body: Record<string, unknown> = {\n model,\n messages: options.messages.map((m) => {\n const msg: Record<string, unknown> = { role: m.role };\n // Compress system prompts for cloud models with tools to improve tool-calling compliance.\n // Pass descriptions of complex tools (>200 chars) so compression always preserves them —\n // prevents the model from forgetting available actions mid-task (e.g. after CONFIRM).\n if (m.role === 'system' && isCloudModel && hasTools) {\n const activeToolDescs = (options.tools ?? [])\n .filter(t => (t.function.description?.length ?? 0) > 200)\n .map(t => ({ name: t.function.name, description: t.function.description ?? '' }));\n msg.content = compressSystemPrompt(m.content, activeToolDescs.length > 0 ? activeToolDescs : undefined);\n } else {\n msg.content = m.content;\n }\n if (m.toolCalls && m.toolCalls.length > 0) {\n msg.tool_calls = m.toolCalls.map(tc => {\n let parsedArgs: Record<string, unknown> = {};\n try {\n parsedArgs = JSON.parse(tc.function.arguments || '{}');\n } catch {\n logger.warn(COMPONENT, `Malformed tool arguments for ${tc.function.name}, using empty args`);\n }\n // v4.13: Gemini's Ollama-compat adapter rejects\n // function_call.name === ''. Some models emit empty\n // names for tool_calls when the call is malformed;\n // stamp a placeholder so the whole turn isn't\n // rejected with HTTP 400 \"Name cannot be empty\".\n const fnName = (tc.function.name || '').trim() || 'unknown_tool';\n const out: Record<string, unknown> = {\n id: tc.id,\n type: tc.type || 'function',\n function: {\n name: fnName,\n arguments: parsedArgs,\n },\n };\n // v4.13: relay Gemini thought_signature through the\n // round-trip. Ollama's Gemini proxy needs it on every\n // subsequent functionCall part or rejects with\n // \"Function call is missing a thought_signature\".\n if (tc.thoughtSignature) {\n (out.function as Record<string, unknown>).thought_signature = tc.thoughtSignature;\n out.thought_signature = tc.thoughtSignature;\n }\n return out;\n });\n }\n if (m.toolCallId) msg.tool_call_id = m.toolCallId;\n // Cloud models (Gemini API) require function_response.name to be non-empty.\n // Guarantee a non-empty name on every tool-role message.\n if (m.role === 'tool') {\n const toolName = (m.name || '').trim() || 'tool';\n msg.name = toolName;\n } else if (m.name) {\n msg.name = m.name;\n }\n return msg;\n }),\n stream: false,\n keep_alive: '30m',\n options: {\n // Auto-configure context window per model's known maximum.\n // getModelCtx() returns the correct num_ctx for each cloud/local model.\n // v4.10.0-local (cost cap): capped cloud num_predict to 8K\n // (was 32K). OpenRouter's paid models reject requests whose\n // max_tokens exceeds the remaining credit, even though most\n // responses don't come close to that. 8K is plenty for any\n // single turn and keeps us from getting HTTP 402s when\n // credit runs low.\n num_predict: clampMaxTokens(options.model || 'ollama/llama3.1', options.maxTokens),\n num_ctx: getModelCtx(model),\n temperature: options.temperature ?? 0.7,\n },\n };\n\n // Model capabilities — adapts behavior per model family\n const caps = getModelCapabilities(model);\n\n // Thinking mode: explicitly control per model capabilities.\n // Models that don't benefit from thinking (thinkingWithTools=false) get it disabled\n // to prevent content being routed to the thinking field instead of content field.\n // This is critical for models like minimax-m2.7:cloud which put ALL output in\n // the thinking field when think is unset, leaving content empty.\n if (options.thinking === false) {\n body.think = false;\n } else if (!caps.thinkingWithTools) {\n // Model doesn't support thinking — disable it to prevent 400 errors\n // from Ollama (e.g. \"titan-qwen3.5:4b does not support thinking\").\n body.think = false;\n } else if (options.thinking === true) {\n body.think = true;\n }\n // Otherwise: omit body.think — let the model decide\n\n // Per-turn override: when the conversation contains tool-role messages,\n // force think=false regardless of caller intent. The GLM-family\n // tool-call parser on the server (vLLM #39611, confirmed by Z.ai docs\n // for GLM-5.1) silently drops tool results when enable_thinking=true,\n // breaking the multi-turn tool loop. Z.ai's own guidance: disable\n // thinking on tool-call turns. This keeps reasoning available for\n // planning turns while preventing the drop on execution turns.\n if (hasToolRoleMessages && body.think !== false) {\n const priorIntent = body.think === undefined ? 'unset' : String(body.think);\n body.think = false;\n logger.info(COMPONENT, `[ToolTurnThinkOverride] Forcing think=false for ${model} (tool-role message present, caller intent=${priorIntent})`);\n }\n\n if (options.tools && options.tools.length > 0) {\n body.tools = options.tools.map((t) => ({\n type: 'function',\n function: {\n name: t.function.name,\n description: t.function.description,\n parameters: simplifySchema(t.function.parameters),\n },\n }));\n // Per-model optimal sampling for tool calling\n (body.options as Record<string, unknown>).temperature = options.temperature ?? caps.toolTemperature ?? 0.5;\n if (caps.toolTopP) (body.options as Record<string, unknown>).top_p = caps.toolTopP;\n if (caps.toolTopK) (body.options as Record<string, unknown>).top_k = caps.toolTopK;\n\n // Force a tool call on the first round when the task requires it\n // Models that self-select tools well don't need forcing — it hurts them\n if (options.forceToolUse && !caps.selfSelectsTools) {\n body.tool_choice = 'required';\n logger.info(COMPONENT, `[ToolChoiceRequired] Setting tool_choice=required for ${model} (forceToolUse=true, selfSelectsTools=false)`);\n } else if (options.forceToolUse && caps.selfSelectsTools) {\n logger.info(COMPONENT, `[ToolChoiceSkipped] forceToolUse=true but selfSelectsTools=true for ${model} — NOT setting tool_choice`);\n }\n }\n\n // Ollama-native structured outputs — constrain generation to a JSON schema.\n // https://docs.ollama.com/capabilities/structured-outputs.md\n if (options.format !== undefined) {\n body.format = options.format;\n }\n\n // Cloud models: trim conversation history preserving tool call/response pairs.\n // With 131K context window, cloud models can handle much longer histories.\n // E1: Use >= 80 with margin (trim to 75) to prevent off-by-one at exact boundary.\n if (isCloudModel && hasTools) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n if (msgs.length >= 80) {\n const trimmed = trimPreservingToolPairs(msgs, 80);\n logger.info(COMPONENT, `Cloud model context trim: ${msgs.length} → ${trimmed.length} messages`);\n body.messages = trimmed;\n }\n }\n\n // Some models ignore standalone system messages during tool calling.\n // Only merge when the model's capability profile says it needs it.\n if (hasTools && caps.needsSystemMerge) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n const sysIdx = msgs.findIndex(m => m.role === 'system');\n const firstUserIdx = msgs.findIndex(m => m.role === 'user');\n if (sysIdx >= 0 && firstUserIdx >= 0 && msgs[sysIdx].content) {\n const sysContent = msgs[sysIdx].content as string;\n msgs[firstUserIdx].content = `[System Instructions]\\n${sysContent}\\n\\n[User Message]\\n${msgs[firstUserIdx].content}`;\n msgs.splice(sysIdx, 1); // Remove the standalone system message\n logger.info(COMPONENT, `Merged system prompt into first user message for cloud model compatibility`);\n }\n }\n\n const sentMessages = body.messages as Array<{role: string; content: string}>;\n const toolNames = body.tools ? (body.tools as Array<{function: {name: string}}>).map(t => t.function.name) : [];\n logger.info(COMPONENT, `Chat request: model=${model}, cloud=${isCloudModel}, tools=[${toolNames.join(',')}], think=${body.think}, messages=${sentMessages.length}`);\n \n if (process.env.DUMP_OLLAMA_BODY === '1' || model.includes('gemini')) {\n logger.error(COMPONENT, `[DUMP_BODY] Dumping failing request body for ${model} to /tmp/ollama-body-dump.json`);\n try {\n fs.writeFileSync('/tmp/ollama-body-dump.json', JSON.stringify(body, null, 2));\n } catch (e) {\n logger.error(COMPONENT, `Failed to dump body: ${e}`);\n }\n }\n\n // Cloud models routed through Ollama need longer timeouts (they proxy to remote APIs)\n const timeoutMs = isCloudModel ? 300_000 : 120_000; // 5min cloud, 2min local\n let response = await fetchWithRetry(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n }, { timeoutMs });\n\n if (!response.ok) {\n const errorText = await response.text();\n // Fallback: if model doesn't support native tool calling or tokenization\n // fails with tools, retry without tools. Covers Gemini proxy errors like\n // \"does not support tools\" and \"tokenization\" failures on malformed schemas.\n if (response.status === 400 && body.tools && (\n errorText.includes('does not support tools') ||\n errorText.includes('tokenization') ||\n errorText.includes('tokenize') ||\n errorText.includes('Invalid JSON')\n )) {\n logger.warn(COMPONENT, `Model ${model} does not support native tool calling — running in chat-only mode`);\n delete body.tools;\n response = await fetchWithRetry(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n }, { timeoutMs });\n if (!response.ok) {\n const retryText = await response.text();\n // Hunt Finding #37 (2026-04-14): use createProviderError to\n // attach status + parsed Retry-After so the router actually\n // respects the provider's backoff hint.\n const { createProviderError } = await import('./errorTaxonomy.js');\n throw createProviderError('Ollama', response, retryText, { provider: 'ollama', model });\n }\n } else {\n const { createProviderError } = await import('./errorTaxonomy.js');\n throw createProviderError('Ollama', response, errorText, { provider: 'ollama', model });\n }\n }\n\n // v4.13 ancestor-extraction (Hermes rate_limit_tracker): capture any\n // x-ratelimit-* headers the Ollama proxy exposes. Graceful no-op when\n // the headers aren't present. Provider name is 'ollama' so the router's\n // proactive-backoff logic can consult per-provider state.\n try {\n const { recordHeaders } = await import('./rateLimitTracker.js');\n recordHeaders('ollama', response.headers);\n } catch { /* never fail the chat on tracker issues */ }\n\n const data = await response.json() as Record<string, unknown>;\n const message = data.message as Record<string, unknown>;\n logger.info(COMPONENT, `Response from ${model}: tool_calls=${JSON.stringify(message.tool_calls)}, content_length=${((message.content as string) || '').length}`);\n const toolCalls: ToolCall[] = [];\n\n // v5.0.2: Only accept tool_calls from the model if tools were actually\n // sent in the request. Prevents hallucinated tool calls when the safety\n // system has stripped all tools (activeTools = []) or for models that\n // emit tool_calls even without tool definitions.\n if (message.tool_calls && options.tools && options.tools.length > 0) {\n for (const tc of message.tool_calls as Array<Record<string, unknown>>) {\n const fn = tc.function as Record<string, unknown>;\n // v4.13: capture Gemini thought_signature if present — needed\n // on the round-trip back or Gemini rejects the next request.\n const thoughtSig = (tc.thought_signature as string | undefined) ??\n (tc.thoughtSignature as string | undefined) ??\n (fn.thought_signature as string | undefined) ??\n (fn.thoughtSignature as string | undefined);\n toolCalls.push({\n id: uuid(),\n type: 'function',\n function: {\n name: fn.name as string,\n arguments: JSON.stringify(fn.arguments),\n },\n ...(thoughtSig ? { thoughtSignature: thoughtSig } : {}),\n });\n }\n }\n\n // A2: Hallucinated tool name detection at provider level (LangGraph pattern)\n if (options.tools && toolCalls.length > 0) {\n const validNames = new Set(options.tools.map(t => t.function.name));\n const invalid = toolCalls.filter(tc => !validNames.has(tc.function.name));\n if (invalid.length > 0) {\n logger.warn(COMPONENT, `[HallucinationGuard] Model hallucinated ${invalid.length} tool name(s): ${invalid.map(tc => tc.function.name).join(', ')}. Will be caught by toolRunner with corrective feedback.`);\n }\n }\n\n // If content is empty but thinking field has content, use it as a fallback.\n // This handles models that route output to thinking field when think is\n // unset or misconfigured. The router's stripThinkingFromResponse() will\n // clean up any reasoning that leaks through, so we can be permissive here.\n let content = (message.content as string) || '';\n if (!content && message.thinking) {\n const thinking = (message.thinking as string) || '';\n if (thinking.length > 0) {\n logger.info(COMPONENT, `[ThinkingFallback] Content empty, using thinking field (${thinking.length} chars)`);\n content = thinking;\n }\n }\n // Strip leaked thinking tags from Qwen/DeepSeek models\n content = content.replace(/^[\\s\\S]*?<\\/think>\\s*/m, '').trim();\n\n return {\n id: uuid(),\n content,\n toolCalls: toolCalls.length > 0 ? toolCalls : undefined,\n usage: {\n promptTokens: (data.prompt_eval_count as number) || 0,\n completionTokens: (data.eval_count as number) || 0,\n totalTokens: ((data.prompt_eval_count as number) || 0) + ((data.eval_count as number) || 0),\n },\n finishReason: toolCalls.length > 0 ? 'tool_calls' : 'stop',\n model: `ollama/${model}`,\n };\n }\n\n async *chatStream(options: ChatOptions): AsyncGenerator<ChatStreamChunk> {\n const model = (options.model || 'llama3.1').replace('ollama/', '');\n\n // Cloud models (Ollama Pro): detect via -cloud suffix or :cloud tag\n const isCloudModel = model.includes('-cloud') || model.includes(':cloud');\n const hasTools = options.tools && options.tools.length > 0;\n const hasToolRoleMessages = options.messages.some(m => m.role === 'tool');\n\n const body: Record<string, unknown> = {\n model,\n messages: options.messages.map((m) => {\n const msg: Record<string, unknown> = { role: m.role };\n // Compress system prompts for cloud models with tools — preserve active tool descriptions\n if (m.role === 'system' && isCloudModel && hasTools) {\n const activeToolDescs = (options.tools ?? [])\n .filter(t => (t.function.description?.length ?? 0) > 200)\n .map(t => ({ name: t.function.name, description: t.function.description ?? '' }));\n msg.content = compressSystemPrompt(m.content, activeToolDescs.length > 0 ? activeToolDescs : undefined);\n } else {\n msg.content = m.content;\n }\n if (m.toolCalls && m.toolCalls.length > 0) {\n msg.tool_calls = m.toolCalls.map(tc => ({\n id: tc.id,\n type: tc.type || 'function',\n function: { name: tc.function.name, arguments: JSON.parse(tc.function.arguments || '{}') }\n }));\n }\n if (m.toolCallId) msg.tool_call_id = m.toolCallId;\n // Cloud models (Gemini API) require function_response.name to be non-empty\n if (m.role === 'tool') {\n msg.name = m.name || 'tool';\n } else if (m.name) {\n msg.name = m.name;\n }\n return msg;\n }),\n stream: true,\n keep_alive: '30m',\n options: {\n // v4.10.0-local (cost cap): 8K cloud cap matches non-stream path\n num_predict: clampMaxTokens(options.model || 'ollama/llama3.1', options.maxTokens),\n num_ctx: getModelCtx(model),\n temperature: options.temperature ?? 0.7,\n },\n };\n\n // Model capabilities — adapts behavior per model family\n const caps = getModelCapabilities(model);\n\n // Thinking mode: respect explicit setting, otherwise use model capabilities.\n // Disable for models that don't support thinking — prevents 400 errors.\n if (options.thinking === false) {\n body.think = false;\n } else if (!caps.thinkingWithTools) {\n body.think = false;\n } else if (options.thinking === true) {\n body.think = true;\n }\n\n // Per-turn override for tool-role turns (see chat() for rationale: vLLM #39611 / Z.ai docs).\n if (hasToolRoleMessages && body.think !== false) {\n const priorIntent = body.think === undefined ? 'unset' : String(body.think);\n body.think = false;\n logger.info(COMPONENT, `[ToolTurnThinkOverride] (stream) Forcing think=false for ${model} (tool-role message present, caller intent=${priorIntent})`);\n }\n\n if (hasTools) {\n body.tools = options.tools!.map((t) => ({\n type: 'function',\n function: {\n name: t.function.name,\n description: t.function.description,\n parameters: simplifySchema(t.function.parameters),\n },\n }));\n // Per-model optimal sampling for tool calling\n (body.options as Record<string, unknown>).temperature = options.temperature ?? caps.toolTemperature ?? 0.5;\n if (caps.toolTopP) (body.options as Record<string, unknown>).top_p = caps.toolTopP;\n if (caps.toolTopK) (body.options as Record<string, unknown>).top_k = caps.toolTopK;\n\n // Force tool_choice when requested — skip for models that self-select well\n if (options.forceToolUse && !caps.selfSelectsTools) {\n body.tool_choice = 'required';\n }\n }\n\n // Ollama-native structured outputs (stream variant).\n if (options.format !== undefined) {\n body.format = options.format;\n }\n\n // Optimize: trim history preserving tool pairs (cloud models only — local models have smaller contexts)\n if (isCloudModel && hasTools) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n if (msgs.length > 80) {\n const trimmed = trimPreservingToolPairs(msgs, 80);\n logger.info(COMPONENT, `[Stream] Cloud model context trim: ${msgs.length} → ${trimmed.length} messages`);\n body.messages = trimmed;\n }\n }\n // Merge system into first user message only for models that need it\n if (hasTools && caps.needsSystemMerge) {\n const msgs2 = body.messages as Array<Record<string, unknown>>;\n const sysIdx = msgs2.findIndex(m => m.role === 'system');\n const firstUserIdx = msgs2.findIndex(m => m.role === 'user');\n if (sysIdx >= 0 && firstUserIdx >= 0 && msgs2[sysIdx].content) {\n const sysContent = msgs2[sysIdx].content as string;\n msgs2[firstUserIdx].content = `[System Instructions]\\n${sysContent}\\n\\n[User Message]\\n${msgs2[firstUserIdx].content}`;\n msgs2.splice(sysIdx, 1);\n }\n }\n\n try {\n // Cloud models need longer timeouts for streaming too\n const streamTimeoutMs = isCloudModel ? 300_000 : 120_000;\n let response = await fetch(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(streamTimeoutMs),\n });\n\n if (!response.ok || !response.body) {\n const errorText = await response.text();\n // Fallback: if model doesn't support native tool calling, retry without tools\n if (response.status === 400 && errorText.includes('does not support tools') && body.tools) {\n logger.warn(COMPONENT, `Model ${model} does not support native tool calling — running in chat-only mode`);\n delete body.tools;\n response = await fetch(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(streamTimeoutMs),\n });\n if (!response.ok || !response.body) {\n const retryText = await response.text();\n yield { type: 'error', error: `Ollama error (${response.status}): ${retryText}` };\n return;\n }\n } else {\n yield { type: 'error', error: `Ollama error (${response.status}): ${errorText}` };\n return;\n }\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n let insideThink = false;\n\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n buffer += decoder.decode(value, { stream: true });\n\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (!line.trim()) continue;\n try {\n const chunk = JSON.parse(line);\n // Handle thinking field for models that put content there\n // Some models (e.g. qwen3.5, nemotron-super:cloud) use the thinking field\n // even when think=false is set — treat thinking as content in that case\n if (!chunk.message?.content && chunk.message?.thinking) {\n if (body.think === false) {\n chunk.message.content = chunk.message.thinking;\n }\n }\n if (chunk.message?.content) {\n let text = chunk.message.content;\n // Strip leaked <think>...</think> blocks from Qwen/DeepSeek\n if (text.includes('<think>')) insideThink = true;\n if (insideThink) {\n if (text.includes('</think>')) {\n text = text.split('</think>').pop()?.trim() || '';\n insideThink = false;\n } else {\n continue; // suppress thinking content\n }\n }\n if (text) yield { type: 'text', content: text };\n }\n // v5.0.2: Only yield tool_calls if tools were sent in the request\n if (chunk.message?.tool_calls && options.tools && options.tools.length > 0) {\n for (const tc of chunk.message.tool_calls) {\n const fn = tc.function as Record<string, unknown>;\n yield {\n type: 'tool_call',\n toolCall: { id: uuid(), type: 'function', function: { name: fn.name as string, arguments: JSON.stringify(fn.arguments) } },\n };\n }\n }\n if (chunk.done) break;\n } catch { /* skip malformed NDJSON lines */ }\n }\n }\n yield { type: 'done' };\n } catch (error) {\n yield { type: 'error', error: (error as Error).message };\n }\n }\n\n async listModels(): Promise<string[]> {\n try {\n const response = await fetch(`${this.baseUrl}/api/tags`);\n if (!response.ok) {\n // Hunt Finding #29 (2026-04-14): consume the body even on\n // error paths so the underlying socket can return to the\n // keep-alive pool. Without this, every non-200 response\n // leaks its socket until the GC gets around to it.\n await response.body?.cancel().catch(() => {});\n return [];\n }\n const data = await response.json() as { models?: Array<{ name: string }> };\n return (data.models || []).map((m) => m.name);\n } catch {\n return [];\n }\n }\n\n async healthCheck(): Promise<boolean> {\n try {\n const response = await fetch(`${this.baseUrl}/api/tags`);\n const ok = response.ok;\n // Hunt Finding #29 (2026-04-14): ALWAYS consume or cancel the\n // body. Previously we returned response.ok directly, leaving the\n // body stream dangling and the socket held open.\n await response.body?.cancel().catch(() => {});\n return ok;\n } catch {\n return false;\n }\n }\n}\n"],"mappings":";AAGA;AAAA,EACI;AAAA,OAKG;AACP,SAAS,kBAAkB;AAC3B,OAAO,YAAY;AACnB,SAAS,sBAAsB;AAC/B,SAAS,MAAM,YAAY;AAC3B,YAAY,QAAQ;AACpB,SAAS,sBAAsB;AAE/B,MAAM,YAAY;AAOlB,MAAM,kBAA0C;AAAA;AAAA,EAE5C,iBAAiB;AAAA;AAAA,EAEjB,eAAe;AAAA;AAAA,EAEf,mBAAmB;AAAA;AAAA,EAEnB,mBAAmB;AAAA;AAAA,EAEnB,0BAA0B;AAAA;AAAA,EAE1B,sBAAsB;AAAA;AAAA,EAEtB,4BAA4B;AAAA;AAAA,EAE5B,4BAA4B;AAAA,EAC5B,uBAAuB;AAAA;AAAA,EAEvB,oBAAoB;AAAA;AAAA,EAEpB,oBAAoB;AAAA,EACpB,2BAA2B;AAAA;AAAA,EAE3B,mBAAmB;AAAA,EACnB,0BAA0B;AAAA,EAC1B,sBAAsB;AAAA,EACtB,uBAAuB;AAAA;AAAA,EAEvB,0BAA0B;AAAA;AAAA,EAE1B,iCAAiC;AAAA;AAAA,EAEjC,sBAAsB;AAAA;AAAA,EAEtB,sBAAsB;AAAA;AAAA,EAEtB,gBAAgB;AAAA;AAAA,EAEhB,eAAe;AACnB;AAsBA,MAAM,uBAA0C;AAAA,EAC5C,kBAAkB;AAAA,EAClB,mBAAmB;AAAA,EACnB,kBAAkB;AAAA;AAAA,EAClB,iBAAiB;AAAA,EACjB,UAAU;AAAA,EACV,UAAU;AACd;AAMA,SAAS,0BAA0B,WAA2D;AAC1F,QAAM,QAAQ,UAAU,YAAY;AAGpC,MAAI,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ,GAAG;AACtD,WAAO,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EAC5G;AAGA,QAAM,YAAY,MAAM,MAAM,QAAQ;AACtC,MAAI,WAAW;AACX,UAAM,OAAO,SAAS,UAAU,CAAC,GAAG,EAAE;AACtC,QAAI,QAAQ,IAAI;AACZ,aAAO,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,IAC7G;AAAA,EACJ;AAGA,QAAM,kBAAkB,CAAC,QAAQ,OAAO,YAAY,QAAQ,SAAS,YAAY,YAAY,UAAU,iBAAiB,YAAY,UAAU,QAAQ,gBAAgB;AACtK,aAAW,UAAU,iBAAiB;AAClC,QAAI,MAAM,SAAS,MAAM,GAAG;AACxB,aAAO,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,IAC7G;AAAA,EACJ;AAGA,SAAO;AACX;AAEA,MAAM,qBAAiE;AAAA;AAAA,EAEnE,WAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,SAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,oBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGrH,eAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,iBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,iBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASrH,gBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,KAAK,UAAU,MAAM,UAAU,GAAG;AAAA,EACpJ,cAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,KAAK,UAAU,KAAK;AAAA;AAAA,EAGtI,UAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,GAAK,UAAU,MAAM,UAAU,GAAG;AAAA,EACpJ,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,WAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,SAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,oBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACpH,mBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,aAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,aAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGrH,cAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACtH,oBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,kBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGtH,YAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,YAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAC1H;AAUA,SAAS,qBAAqB,WAAsC;AAEhE,MAAI;AAEA,UAAM,EAAE,gBAAgB,aAAa,IAAI,QAAQ,kCAAkC;AACnF,UAAM,QAAQ,eAAe,SAAS,KAAK,eAAe,UAAU,SAAS,EAAE;AAC/E,QAAI,SAAS,CAAC,aAAa,KAAK,GAAG;AAE/B,aAAO;AAAA,QACH,GAAG;AAAA,QACH,kBAAkB,MAAM;AAAA,QACxB,mBAAmB,MAAM,mBAAmB,CAAC,MAAM;AAAA,QACnD,kBAAkB,CAAC,MAAM;AAAA,QACzB,iBAAiB,MAAM,kBAAkB,MAAM;AAAA,QAC/C,UAAU;AAAA,QACV,UAAU;AAAA,MACd;AAAA,IACJ;AAAA,EACJ,QAAQ;AAAA,EAER;AAGJ,QAAM,gBAAgB,oBAAI,IAAY;AAItC,WAAS,uBAAuBA,YAAyB;AACrD,QAAI,cAAc,IAAIA,UAAS,EAAG;AAClC,kBAAc,IAAIA,UAAS;AAE3B,WAAO,wBAAwB,EAC1B,KAAK,CAAC,EAAE,WAAW,MAAM,WAAW,UAAUA,UAAS,EAAE,CAAC,EAC1D,KAAK,CAAC,WAAW,OAAO,kCAAkC,EACtD,KAAK,CAAC,EAAE,kBAAkB,MAAM;AAC7B,wBAAkB,MAAM;AACxB,aAAO,KAAK,WAAW,iCAAiCA,UAAS,iBAAiB,OAAO,eAAe,oBAAoB,OAAO,oBAAoB,EAAE;AAAA,IAC7J,CAAC,CAAC,EACL,MAAM,CAAC,QAAQ,OAAO,KAAK,WAAW,+BAA+BA,UAAS,KAAM,IAAc,OAAO,EAAE,CAAC,EAC5G,QAAQ,MAAM,cAAc,OAAOA,UAAS,CAAC;AAAA,EACtD;AAGI,QAAM,OAAO,UAAU,SAAS,GAAG,IAAI,UAAU,MAAM,GAAG,EAAE,MAAM,CAAC,EAAE,KAAK,GAAG,IAAI;AAEjF,QAAM,QAAQ,KAAK,QAAQ,mCAAmC,EAAE;AAIhE,QAAM,gBAAgB;AACtB,QAAM,WAAW,MAAM,QAAQ,eAAe,EAAE;AAEhD,MAAI;AACJ,MAAI,UAAU;AACd,aAAW,CAAC,SAAS,IAAI,KAAK,OAAO,QAAQ,kBAAkB,GAAG;AAC9D,QAAI,aAAa,WAAW,SAAS,WAAW,OAAO,GAAG;AACtD,UAAI,QAAQ,SAAS,SAAS;AAC1B,oBAAY;AACZ,kBAAU,QAAQ;AAAA,MACtB;AAAA,IACJ;AAAA,EACJ;AAEA,MAAI,CAAC,WAAW;AAEZ,UAAM,WAAW,0BAA0B,SAAS;AACpD,QAAI,UAAU;AACV,aAAO,KAAK,WAAW,UAAU,SAAS,8DAAyD,KAAK,UAAU,QAAQ,CAAC,EAAE;AAC7H,kBAAY;AAAA,IAChB,OAAO;AACH,aAAO,KAAK,WAAW,UAAU,SAAS,+GAA0G;AACpJ,6BAAuB,SAAS;AAAA,IACpC;AAAA,EACJ;AACA,SAAO,EAAE,GAAG,sBAAsB,GAAI,aAAa,CAAC,EAAG;AAC3D;AAGA,SAAS,YAAY,WAA2B;AAC5C,QAAM,OAAO,UAAU,SAAS,GAAG,IAAI,UAAU,MAAM,GAAG,EAAE,MAAM,CAAC,EAAE,KAAK,GAAG,IAAI;AACjF,MAAI,gBAAgB,IAAI,EAAG,QAAO,gBAAgB,IAAI;AAGtD,MAAI,KAAK,SAAS,QAAQ,KAAK,KAAK,SAAS,QAAQ,EAAG,QAAO;AAG/D,QAAM,YAAY,KAAK,MAAM,SAAS;AACtC,MAAI,WAAW;AACX,UAAM,OAAO,SAAS,UAAU,CAAC,GAAG,EAAE;AACtC,QAAI,QAAQ,GAAI,QAAO;AACvB,QAAI,QAAQ,GAAI,QAAO;AACvB,QAAI,QAAQ,GAAI,QAAO;AAAA,EAC3B;AAGA,SAAO;AACX;AAMA,MAAM,0BAA0B;AAYhC,SAAS,qBAAqB,SAAiB,aAAoE;AAC/G,MAAI,QAAQ,UAAU,wBAAyB,QAAO;AAEtD,QAAM,WAAqB,CAAC;AAG5B,QAAM,gBAAgB,QAAQ,MAAM,+DAA+D;AACnG,MAAI,eAAe;AACf,aAAS,KAAK,cAAc,CAAC,EAAE,KAAK,CAAC;AAAA,EACzC,OAAO;AACH,aAAS,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,8EASmD;AAAA,EACrE;AAGA,QAAM,gBAAgB,QAAQ,MAAM,6CAA6C;AACjF,MAAI,cAAe,UAAS,KAAK,cAAc,CAAC,EAAE,KAAK,CAAC;AAGxD,WAAS,KAAK,qNAAqN;AACnO,WAAS,KAAK,+LAAqL;AAInM,MAAI,eAAe,YAAY,SAAS,GAAG;AACvC,UAAM,cAAc;AACpB,UAAM,YAAsB,CAAC;AAC7B,QAAI,YAAY;AAChB,eAAW,KAAK,aAAa;AAEzB,YAAM,OAAO,EAAE,YAAY,SAAS,MAAM,EAAE,YAAY,MAAM,GAAG,GAAG,IAAI,QAAQ,EAAE;AAClF,YAAM,OAAO,OAAO,EAAE,IAAI,OAAO,IAAI;AACrC,UAAI,YAAY,KAAK,SAAS,YAAa;AAC3C,gBAAU,KAAK,IAAI;AACnB,mBAAa,KAAK;AAAA,IACtB;AACA,QAAI,UAAU,SAAS,GAAG;AACtB,eAAS,KAAK;AAAA,EAAoB,UAAU,KAAK,IAAI,CAAC,EAAE;AAAA,IAC5D;AAAA,EACJ;AAEA,QAAM,aAAa,SAAS,KAAK,MAAM;AAEvC,MAAI,WAAW,UAAU,QAAQ,QAAQ;AACrC,WAAO,KAAK,WAAW,sCAAsC,WAAW,MAAM,OAAO,QAAQ,MAAM,6BAA6B;AAChI,WAAO,QAAQ,MAAM,GAAG,uBAAuB;AAAA,EACnD;AACA,SAAO,KAAK,WAAW,6CAA6C,QAAQ,MAAM,WAAM,WAAW,MAAM,QAAQ;AACjH,SAAO;AACX;AAOA,SAAS,wBAAwB,MAAsC,UAAkD;AACrH,QAAM,aAAa,KAAK,OAAO,OAAK,EAAE,SAAS,QAAQ;AACvD,QAAM,YAAY,KAAK,OAAO,OAAK,EAAE,SAAS,QAAQ;AACtD,QAAM,eAAe,WAAW,WAAW;AAE3C,MAAI,UAAU,UAAU,aAAc,QAAO;AAG7C,QAAM,OAAuC,CAAC;AAC9C,MAAI,IAAI,UAAU,SAAS;AAC3B,SAAO,KAAK,KAAK,KAAK,SAAS,cAAc;AACzC,UAAM,MAAM,UAAU,CAAC;AACvB,QAAI,IAAI,SAAS,QAAQ;AAErB,WAAK,QAAQ,GAAG;AAChB,eAAS,IAAI,IAAI,GAAG,KAAK,GAAG,KAAK;AAC7B,YAAI,UAAU,CAAC,EAAE,SAAS,gBAAgB,UAAU,CAAC,EAAE,cAAc,UAAU,CAAC,EAAE,YAAY;AAC1F,eAAK,QAAQ,UAAU,CAAC,CAAC;AACzB,cAAI,IAAI;AACR;AAAA,QACJ;AACA,YAAI,UAAU,CAAC,EAAE,SAAS,QAAQ;AAE9B,eAAK,QAAQ,UAAU,CAAC,CAAC;AAAA,QAC7B,OAAO;AACH,cAAI;AACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,OAAO;AACH,WAAK,QAAQ,GAAG;AAChB;AAAA,IACJ;AAAA,EACJ;AAEA,SAAO,CAAC,GAAG,YAAY,GAAG,IAAI;AAClC;AAMA,SAAS,eAAe,QAAsE;AAC1F,MAAI,CAAC,OAAQ,QAAO,EAAE,MAAM,UAAU,YAAY,CAAC,EAAE;AACrD,QAAM,QAAiC,EAAE,MAAM,OAAO,QAAQ,SAAS;AACvE,MAAI,OAAO,YAAY;AACnB,UAAM,QAAiC,CAAC;AACxC,eAAW,CAAC,KAAK,GAAG,KAAK,OAAO,QAAQ,OAAO,UAAqD,GAAG;AAEnG,YAAM,OAAgC,EAAE,MAAM,IAAI,QAAQ,SAAS;AACnE,UAAI,IAAI,YAAa,MAAK,cAAc,IAAI;AAC5C,UAAI,IAAI,KAAM,MAAK,OAAO,IAAI;AAC9B,UAAI,IAAI,YAAY,OAAW,MAAK,UAAU,IAAI;AAClD,YAAM,GAAG,IAAI;AAAA,IACjB;AACA,UAAM,aAAa;AAAA,EACvB;AACA,MAAI,OAAO,SAAU,OAAM,WAAW,OAAO;AAC7C,SAAO;AACX;AAEO,MAAM,uBAAuB,YAAY;AAAA,EACnC,OAAO;AAAA,EACP,cAAc;AAAA,EAEvB,IAAY,UAAkB;AAC1B,UAAM,SAAS,WAAW;AAC1B,WAAO,OAAO,UAAU,OAAO,WAAW,QAAQ,IAAI,mBAAmB;AAAA,EAC7E;AAAA,EAEA,MAAM,KAAK,SAA6C;AACpD,UAAM,SAAS,QAAQ,SAAS,YAAY,QAAQ,WAAW,EAAE;AACjE,WAAO,MAAM,WAAW,uBAAuB,KAAK,cAAc,QAAQ,SAAS,MAAM,EAAE;AAG3F,UAAM,eAAe,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ;AACxE,UAAM,WAAW,QAAQ,SAAS,QAAQ,MAAM,SAAS;AACzD,UAAM,sBAAsB,QAAQ,SAAS,KAAK,OAAK,EAAE,SAAS,MAAM;AAExE,UAAM,OAAgC;AAAA,MAClC;AAAA,MACA,UAAU,QAAQ,SAAS,IAAI,CAAC,MAAM;AAClC,cAAM,MAA+B,EAAE,MAAM,EAAE,KAAK;AAIpD,YAAI,EAAE,SAAS,YAAY,gBAAgB,UAAU;AACjD,gBAAM,mBAAmB,QAAQ,SAAS,CAAC,GACtC,OAAO,QAAM,EAAE,SAAS,aAAa,UAAU,KAAK,GAAG,EACvD,IAAI,QAAM,EAAE,MAAM,EAAE,SAAS,MAAM,aAAa,EAAE,SAAS,eAAe,GAAG,EAAE;AACpF,cAAI,UAAU,qBAAqB,EAAE,SAAS,gBAAgB,SAAS,IAAI,kBAAkB,MAAS;AAAA,QAC1G,OAAO;AACH,cAAI,UAAU,EAAE;AAAA,QACpB;AACA,YAAI,EAAE,aAAa,EAAE,UAAU,SAAS,GAAG;AACvC,cAAI,aAAa,EAAE,UAAU,IAAI,QAAM;AACnC,gBAAI,aAAsC,CAAC;AAC3C,gBAAI;AACA,2BAAa,KAAK,MAAM,GAAG,SAAS,aAAa,IAAI;AAAA,YACzD,QAAQ;AACJ,qBAAO,KAAK,WAAW,gCAAgC,GAAG,SAAS,IAAI,oBAAoB;AAAA,YAC/F;AAMA,kBAAM,UAAU,GAAG,SAAS,QAAQ,IAAI,KAAK,KAAK;AAClD,kBAAM,MAA+B;AAAA,cACjC,IAAI,GAAG;AAAA,cACP,MAAM,GAAG,QAAQ;AAAA,cACjB,UAAU;AAAA,gBACN,MAAM;AAAA,gBACN,WAAW;AAAA,cACf;AAAA,YACJ;AAKA,gBAAI,GAAG,kBAAkB;AACrB,cAAC,IAAI,SAAqC,oBAAoB,GAAG;AACjE,kBAAI,oBAAoB,GAAG;AAAA,YAC/B;AACA,mBAAO;AAAA,UACX,CAAC;AAAA,QACL;AACA,YAAI,EAAE,WAAY,KAAI,eAAe,EAAE;AAGvC,YAAI,EAAE,SAAS,QAAQ;AACnB,gBAAM,YAAY,EAAE,QAAQ,IAAI,KAAK,KAAK;AAC1C,cAAI,OAAO;AAAA,QACf,WAAW,EAAE,MAAM;AACf,cAAI,OAAO,EAAE;AAAA,QACjB;AACA,eAAO;AAAA,MACX,CAAC;AAAA,MACD,QAAQ;AAAA,MACR,YAAY;AAAA,MACZ,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QASL,aAAa,eAAe,QAAQ,SAAS,mBAAmB,QAAQ,SAAS;AAAA,QACjF,SAAS,YAAY,KAAK;AAAA,QAC1B,aAAa,QAAQ,eAAe;AAAA,MACxC;AAAA,IACJ;AAGA,UAAM,OAAO,qBAAqB,KAAK;AAOvC,QAAI,QAAQ,aAAa,OAAO;AAC5B,WAAK,QAAQ;AAAA,IACjB,WAAW,CAAC,KAAK,mBAAmB;AAGhC,WAAK,QAAQ;AAAA,IACjB,WAAW,QAAQ,aAAa,MAAM;AAClC,WAAK,QAAQ;AAAA,IACjB;AAUA,QAAI,uBAAuB,KAAK,UAAU,OAAO;AAC7C,YAAM,cAAc,KAAK,UAAU,SAAY,UAAU,OAAO,KAAK,KAAK;AAC1E,WAAK,QAAQ;AACb,aAAO,KAAK,WAAW,mDAAmD,KAAK,8CAA8C,WAAW,GAAG;AAAA,IAC/I;AAEA,QAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC3C,WAAK,QAAQ,QAAQ,MAAM,IAAI,CAAC,OAAO;AAAA,QACnC,MAAM;AAAA,QACN,UAAU;AAAA,UACN,MAAM,EAAE,SAAS;AAAA,UACjB,aAAa,EAAE,SAAS;AAAA,UACxB,YAAY,eAAe,EAAE,SAAS,UAAU;AAAA,QACpD;AAAA,MACJ,EAAE;AAEF,MAAC,KAAK,QAAoC,cAAc,QAAQ,eAAe,KAAK,mBAAmB;AACvG,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAC1E,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAI1E,UAAI,QAAQ,gBAAgB,CAAC,KAAK,kBAAkB;AAChD,aAAK,cAAc;AACnB,eAAO,KAAK,WAAW,yDAAyD,KAAK,8CAA8C;AAAA,MACvI,WAAW,QAAQ,gBAAgB,KAAK,kBAAkB;AACtD,eAAO,KAAK,WAAW,uEAAuE,KAAK,iCAA4B;AAAA,MACnI;AAAA,IACJ;AAIA,QAAI,QAAQ,WAAW,QAAW;AAC9B,WAAK,SAAS,QAAQ;AAAA,IAC1B;AAKA,QAAI,gBAAgB,UAAU;AAC1B,YAAM,OAAO,KAAK;AAClB,UAAI,KAAK,UAAU,IAAI;AACnB,cAAM,UAAU,wBAAwB,MAAM,EAAE;AAChD,eAAO,KAAK,WAAW,6BAA6B,KAAK,MAAM,WAAM,QAAQ,MAAM,WAAW;AAC9F,aAAK,WAAW;AAAA,MACpB;AAAA,IACJ;AAIA,QAAI,YAAY,KAAK,kBAAkB;AACnC,YAAM,OAAO,KAAK;AAClB,YAAM,SAAS,KAAK,UAAU,OAAK,EAAE,SAAS,QAAQ;AACtD,YAAM,eAAe,KAAK,UAAU,OAAK,EAAE,SAAS,MAAM;AAC1D,UAAI,UAAU,KAAK,gBAAgB,KAAK,KAAK,MAAM,EAAE,SAAS;AAC1D,cAAM,aAAa,KAAK,MAAM,EAAE;AAChC,aAAK,YAAY,EAAE,UAAU;AAAA,EAA0B,UAAU;AAAA;AAAA;AAAA,EAAuB,KAAK,YAAY,EAAE,OAAO;AAClH,aAAK,OAAO,QAAQ,CAAC;AACrB,eAAO,KAAK,WAAW,4EAA4E;AAAA,MACvG;AAAA,IACJ;AAEA,UAAM,eAAe,KAAK;AAC1B,UAAM,YAAY,KAAK,QAAS,KAAK,MAA4C,IAAI,OAAK,EAAE,SAAS,IAAI,IAAI,CAAC;AAC9G,WAAO,KAAK,WAAW,uBAAuB,KAAK,WAAW,YAAY,YAAY,UAAU,KAAK,GAAG,CAAC,YAAY,KAAK,KAAK,cAAc,aAAa,MAAM,EAAE;AAElK,QAAI,QAAQ,IAAI,qBAAqB,OAAO,MAAM,SAAS,QAAQ,GAAG;AAClE,aAAO,MAAM,WAAW,gDAAgD,KAAK,gCAAgC;AAC7G,UAAI;AACA,WAAG,cAAc,8BAA8B,KAAK,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,MAChF,SAAS,GAAG;AACR,eAAO,MAAM,WAAW,wBAAwB,CAAC,EAAE;AAAA,MACvD;AAAA,IACJ;AAGA,UAAM,YAAY,eAAe,MAAU;AAC3C,QAAI,WAAW,MAAM,eAAe,GAAG,KAAK,OAAO,aAAa;AAAA,MAC5D,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,IAC7B,GAAG,EAAE,UAAU,CAAC;AAEhB,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK;AAItC,UAAI,SAAS,WAAW,OAAO,KAAK,UAChC,UAAU,SAAS,wBAAwB,KAC3C,UAAU,SAAS,cAAc,KACjC,UAAU,SAAS,UAAU,KAC7B,UAAU,SAAS,cAAc,IAClC;AACC,eAAO,KAAK,WAAW,SAAS,KAAK,wEAAmE;AACxG,eAAO,KAAK;AACZ,mBAAW,MAAM,eAAe,GAAG,KAAK,OAAO,aAAa;AAAA,UACxD,QAAQ;AAAA,UACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,UAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,QAC7B,GAAG,EAAE,UAAU,CAAC;AAChB,YAAI,CAAC,SAAS,IAAI;AACd,gBAAM,YAAY,MAAM,SAAS,KAAK;AAItC,gBAAM,EAAE,oBAAoB,IAAI,MAAM,OAAO,oBAAoB;AACjE,gBAAM,oBAAoB,UAAU,UAAU,WAAW,EAAE,UAAU,UAAU,MAAM,CAAC;AAAA,QAC1F;AAAA,MACJ,OAAO;AACH,cAAM,EAAE,oBAAoB,IAAI,MAAM,OAAO,oBAAoB;AACjE,cAAM,oBAAoB,UAAU,UAAU,WAAW,EAAE,UAAU,UAAU,MAAM,CAAC;AAAA,MAC1F;AAAA,IACJ;AAMA,QAAI;AACA,YAAM,EAAE,cAAc,IAAI,MAAM,OAAO,uBAAuB;AAC9D,oBAAc,UAAU,SAAS,OAAO;AAAA,IAC5C,QAAQ;AAAA,IAA8C;AAEtD,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,UAAM,UAAU,KAAK;AACrB,WAAO,KAAK,WAAW,iBAAiB,KAAK,gBAAgB,KAAK,UAAU,QAAQ,UAAU,CAAC,qBAAsB,QAAQ,WAAsB,IAAI,MAAM,EAAE;AAC/J,UAAM,YAAwB,CAAC;AAM/B,QAAI,QAAQ,cAAc,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AACjE,iBAAW,MAAM,QAAQ,YAA8C;AACnE,cAAM,KAAK,GAAG;AAGd,cAAM,aAAc,GAAG,qBAClB,GAAG,oBACH,GAAG,qBACH,GAAG;AACR,kBAAU,KAAK;AAAA,UACX,IAAI,KAAK;AAAA,UACT,MAAM;AAAA,UACN,UAAU;AAAA,YACN,MAAM,GAAG;AAAA,YACT,WAAW,KAAK,UAAU,GAAG,SAAS;AAAA,UAC1C;AAAA,UACA,GAAI,aAAa,EAAE,kBAAkB,WAAW,IAAI,CAAC;AAAA,QACzD,CAAC;AAAA,MACL;AAAA,IACJ;AAGA,QAAI,QAAQ,SAAS,UAAU,SAAS,GAAG;AACvC,YAAM,aAAa,IAAI,IAAI,QAAQ,MAAM,IAAI,OAAK,EAAE,SAAS,IAAI,CAAC;AAClE,YAAM,UAAU,UAAU,OAAO,QAAM,CAAC,WAAW,IAAI,GAAG,SAAS,IAAI,CAAC;AACxE,UAAI,QAAQ,SAAS,GAAG;AACpB,eAAO,KAAK,WAAW,2CAA2C,QAAQ,MAAM,kBAAkB,QAAQ,IAAI,QAAM,GAAG,SAAS,IAAI,EAAE,KAAK,IAAI,CAAC,0DAA0D;AAAA,MAC9M;AAAA,IACJ;AAMA,QAAI,UAAW,QAAQ,WAAsB;AAC7C,QAAI,CAAC,WAAW,QAAQ,UAAU;AAC9B,YAAM,WAAY,QAAQ,YAAuB;AACjD,UAAI,SAAS,SAAS,GAAG;AACrB,eAAO,KAAK,WAAW,2DAA2D,SAAS,MAAM,SAAS;AAC1G,kBAAU;AAAA,MACd;AAAA,IACJ;AAEA,cAAU,QAAQ,QAAQ,0BAA0B,EAAE,EAAE,KAAK;AAE7D,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT;AAAA,MACA,WAAW,UAAU,SAAS,IAAI,YAAY;AAAA,MAC9C,OAAO;AAAA,QACH,cAAe,KAAK,qBAAgC;AAAA,QACpD,kBAAmB,KAAK,cAAyB;AAAA,QACjD,cAAe,KAAK,qBAAgC,MAAO,KAAK,cAAyB;AAAA,MAC7F;AAAA,MACA,cAAc,UAAU,SAAS,IAAI,eAAe;AAAA,MACpD,OAAO,UAAU,KAAK;AAAA,IAC1B;AAAA,EACJ;AAAA,EAEA,OAAO,WAAW,SAAuD;AACrE,UAAM,SAAS,QAAQ,SAAS,YAAY,QAAQ,WAAW,EAAE;AAGjE,UAAM,eAAe,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ;AACxE,UAAM,WAAW,QAAQ,SAAS,QAAQ,MAAM,SAAS;AACzD,UAAM,sBAAsB,QAAQ,SAAS,KAAK,OAAK,EAAE,SAAS,MAAM;AAExE,UAAM,OAAgC;AAAA,MAClC;AAAA,MACA,UAAU,QAAQ,SAAS,IAAI,CAAC,MAAM;AAClC,cAAM,MAA+B,EAAE,MAAM,EAAE,KAAK;AAEpD,YAAI,EAAE,SAAS,YAAY,gBAAgB,UAAU;AACjD,gBAAM,mBAAmB,QAAQ,SAAS,CAAC,GACtC,OAAO,QAAM,EAAE,SAAS,aAAa,UAAU,KAAK,GAAG,EACvD,IAAI,QAAM,EAAE,MAAM,EAAE,SAAS,MAAM,aAAa,EAAE,SAAS,eAAe,GAAG,EAAE;AACpF,cAAI,UAAU,qBAAqB,EAAE,SAAS,gBAAgB,SAAS,IAAI,kBAAkB,MAAS;AAAA,QAC1G,OAAO;AACH,cAAI,UAAU,EAAE;AAAA,QACpB;AACA,YAAI,EAAE,aAAa,EAAE,UAAU,SAAS,GAAG;AACvC,cAAI,aAAa,EAAE,UAAU,IAAI,SAAO;AAAA,YACpC,IAAI,GAAG;AAAA,YACP,MAAM,GAAG,QAAQ;AAAA,YACjB,UAAU,EAAE,MAAM,GAAG,SAAS,MAAM,WAAW,KAAK,MAAM,GAAG,SAAS,aAAa,IAAI,EAAE;AAAA,UAC7F,EAAE;AAAA,QACN;AACA,YAAI,EAAE,WAAY,KAAI,eAAe,EAAE;AAEvC,YAAI,EAAE,SAAS,QAAQ;AACnB,cAAI,OAAO,EAAE,QAAQ;AAAA,QACzB,WAAW,EAAE,MAAM;AACf,cAAI,OAAO,EAAE;AAAA,QACjB;AACA,eAAO;AAAA,MACX,CAAC;AAAA,MACD,QAAQ;AAAA,MACR,YAAY;AAAA,MACZ,SAAS;AAAA;AAAA,QAEL,aAAa,eAAe,QAAQ,SAAS,mBAAmB,QAAQ,SAAS;AAAA,QACjF,SAAS,YAAY,KAAK;AAAA,QAC1B,aAAa,QAAQ,eAAe;AAAA,MACxC;AAAA,IACJ;AAGA,UAAM,OAAO,qBAAqB,KAAK;AAIvC,QAAI,QAAQ,aAAa,OAAO;AAC5B,WAAK,QAAQ;AAAA,IACjB,WAAW,CAAC,KAAK,mBAAmB;AAChC,WAAK,QAAQ;AAAA,IACjB,WAAW,QAAQ,aAAa,MAAM;AAClC,WAAK,QAAQ;AAAA,IACjB;AAGA,QAAI,uBAAuB,KAAK,UAAU,OAAO;AAC7C,YAAM,cAAc,KAAK,UAAU,SAAY,UAAU,OAAO,KAAK,KAAK;AAC1E,WAAK,QAAQ;AACb,aAAO,KAAK,WAAW,4DAA4D,KAAK,8CAA8C,WAAW,GAAG;AAAA,IACxJ;AAEA,QAAI,UAAU;AACV,WAAK,QAAQ,QAAQ,MAAO,IAAI,CAAC,OAAO;AAAA,QACpC,MAAM;AAAA,QACN,UAAU;AAAA,UACN,MAAM,EAAE,SAAS;AAAA,UACjB,aAAa,EAAE,SAAS;AAAA,UACxB,YAAY,eAAe,EAAE,SAAS,UAAU;AAAA,QACpD;AAAA,MACJ,EAAE;AAEF,MAAC,KAAK,QAAoC,cAAc,QAAQ,eAAe,KAAK,mBAAmB;AACvG,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAC1E,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAG1E,UAAI,QAAQ,gBAAgB,CAAC,KAAK,kBAAkB;AAChD,aAAK,cAAc;AAAA,MACvB;AAAA,IACJ;AAGA,QAAI,QAAQ,WAAW,QAAW;AAC9B,WAAK,SAAS,QAAQ;AAAA,IAC1B;AAGA,QAAI,gBAAgB,UAAU;AAC1B,YAAM,OAAO,KAAK;AAClB,UAAI,KAAK,SAAS,IAAI;AAClB,cAAM,UAAU,wBAAwB,MAAM,EAAE;AAChD,eAAO,KAAK,WAAW,sCAAsC,KAAK,MAAM,WAAM,QAAQ,MAAM,WAAW;AACvG,aAAK,WAAW;AAAA,MACpB;AAAA,IACJ;AAEA,QAAI,YAAY,KAAK,kBAAkB;AACnC,YAAM,QAAQ,KAAK;AACnB,YAAM,SAAS,MAAM,UAAU,OAAK,EAAE,SAAS,QAAQ;AACvD,YAAM,eAAe,MAAM,UAAU,OAAK,EAAE,SAAS,MAAM;AAC3D,UAAI,UAAU,KAAK,gBAAgB,KAAK,MAAM,MAAM,EAAE,SAAS;AAC3D,cAAM,aAAa,MAAM,MAAM,EAAE;AACjC,cAAM,YAAY,EAAE,UAAU;AAAA,EAA0B,UAAU;AAAA;AAAA;AAAA,EAAuB,MAAM,YAAY,EAAE,OAAO;AACpH,cAAM,OAAO,QAAQ,CAAC;AAAA,MAC1B;AAAA,IACJ;AAEA,QAAI;AAEA,YAAM,kBAAkB,eAAe,MAAU;AACjD,UAAI,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,aAAa;AAAA,QACnD,QAAQ;AAAA,QACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,QAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,QACzB,QAAQ,YAAY,QAAQ,eAAe;AAAA,MAC/C,CAAC;AAED,UAAI,CAAC,SAAS,MAAM,CAAC,SAAS,MAAM;AAChC,cAAM,YAAY,MAAM,SAAS,KAAK;AAEtC,YAAI,SAAS,WAAW,OAAO,UAAU,SAAS,wBAAwB,KAAK,KAAK,OAAO;AACvF,iBAAO,KAAK,WAAW,SAAS,KAAK,wEAAmE;AACxG,iBAAO,KAAK;AACZ,qBAAW,MAAM,MAAM,GAAG,KAAK,OAAO,aAAa;AAAA,YAC/C,QAAQ;AAAA,YACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,YAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,YACzB,QAAQ,YAAY,QAAQ,eAAe;AAAA,UAC/C,CAAC;AACD,cAAI,CAAC,SAAS,MAAM,CAAC,SAAS,MAAM;AAChC,kBAAM,YAAY,MAAM,SAAS,KAAK;AACtC,kBAAM,EAAE,MAAM,SAAS,OAAO,iBAAiB,SAAS,MAAM,MAAM,SAAS,GAAG;AAChF;AAAA,UACJ;AAAA,QACJ,OAAO;AACH,gBAAM,EAAE,MAAM,SAAS,OAAO,iBAAiB,SAAS,MAAM,MAAM,SAAS,GAAG;AAChF;AAAA,QACJ;AAAA,MACJ;AAEA,YAAM,SAAS,SAAS,KAAK,UAAU;AACvC,YAAM,UAAU,IAAI,YAAY;AAChC,UAAI,SAAS;AACb,UAAI,cAAc;AAElB,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,YAAI,KAAM;AACV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAEhD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,CAAC,KAAK,KAAK,EAAG;AAClB,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAI7B,gBAAI,CAAC,MAAM,SAAS,WAAW,MAAM,SAAS,UAAU;AACpD,kBAAI,KAAK,UAAU,OAAO;AACtB,sBAAM,QAAQ,UAAU,MAAM,QAAQ;AAAA,cAC1C;AAAA,YACJ;AACA,gBAAI,MAAM,SAAS,SAAS;AACxB,kBAAI,OAAO,MAAM,QAAQ;AAEzB,kBAAI,KAAK,SAAS,SAAS,EAAG,eAAc;AAC5C,kBAAI,aAAa;AACb,oBAAI,KAAK,SAAS,UAAU,GAAG;AAC3B,yBAAO,KAAK,MAAM,UAAU,EAAE,IAAI,GAAG,KAAK,KAAK;AAC/C,gCAAc;AAAA,gBAClB,OAAO;AACH;AAAA,gBACJ;AAAA,cACJ;AACA,kBAAI,KAAM,OAAM,EAAE,MAAM,QAAQ,SAAS,KAAK;AAAA,YAClD;AAEA,gBAAI,MAAM,SAAS,cAAc,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AACxE,yBAAW,MAAM,MAAM,QAAQ,YAAY;AACvC,sBAAM,KAAK,GAAG;AACd,sBAAM;AAAA,kBACF,MAAM;AAAA,kBACN,UAAU,EAAE,IAAI,KAAK,GAAG,MAAM,YAAY,UAAU,EAAE,MAAM,GAAG,MAAgB,WAAW,KAAK,UAAU,GAAG,SAAS,EAAE,EAAE;AAAA,gBAC7H;AAAA,cACJ;AAAA,YACJ;AACA,gBAAI,MAAM,KAAM;AAAA,UACpB,QAAQ;AAAA,UAAoC;AAAA,QAChD;AAAA,MACJ;AACA,YAAM,EAAE,MAAM,OAAO;AAAA,IACzB,SAAS,OAAO;AACZ,YAAM,EAAE,MAAM,SAAS,OAAQ,MAAgB,QAAQ;AAAA,IAC3D;AAAA,EACJ;AAAA,EAEA,MAAM,aAAgC;AAClC,QAAI;AACA,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,WAAW;AACvD,UAAI,CAAC,SAAS,IAAI;AAKd,cAAM,SAAS,MAAM,OAAO,EAAE,MAAM,MAAM;AAAA,QAAC,CAAC;AAC5C,eAAO,CAAC;AAAA,MACZ;AACA,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,cAAQ,KAAK,UAAU,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,IAAI;AAAA,IAChD,QAAQ;AACJ,aAAO,CAAC;AAAA,IACZ;AAAA,EACJ;AAAA,EAEA,MAAM,cAAgC;AAClC,QAAI;AACA,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,WAAW;AACvD,YAAM,KAAK,SAAS;AAIpB,YAAM,SAAS,MAAM,OAAO,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC5C,aAAO;AAAA,IACX,QAAQ;AACJ,aAAO;AAAA,IACX;AAAA,EACJ;AACJ;","names":["modelName"]}
1
+ {"version":3,"sources":["../../src/providers/ollama.ts"],"sourcesContent":["/**\n * TITAN — Ollama Provider (Local LLMs)\n */\nimport {\n LLMProvider,\n type ChatOptions,\n type ChatResponse,\n type ChatStreamChunk,\n type ToolCall,\n} from './base.js';\nimport { loadConfig } from '../config/config.js';\nimport logger from '../utils/logger.js';\nimport { fetchWithRetry } from '../utils/helpers.js';\nimport { v4 as uuid } from 'uuid';\nimport * as fs from 'fs';\nimport { clampMaxTokens } from './modelCapabilities.js';\n\nconst COMPONENT = 'Ollama';\n\n/**\n * Per-model context window map for Ollama cloud models.\n * Auto-configures num_ctx to each model's actual maximum to prevent truncation.\n * Sources: Ollama Cloud model cards, March 2026.\n */\nconst CLOUD_MODEL_CTX: Record<string, number> = {\n // GLM-5.1 — 198K context (newest agentic flagship, SOTA SWE-Bench Pro)\n 'glm-5.1:cloud': 198656,\n // GLM-5 — 128K context\n 'glm-5:cloud': 131072,\n // Kimi K2.5 — 256K context (native multimodal agentic)\n 'kimi-k2.5:cloud': 262144,\n // Kimi K2.6 — 256K context (next-gen agentic, enhanced reasoning)\n 'kimi-k2.6:cloud': 262144,\n // Qwen3 Coder Next — 262K context (massive)\n 'qwen3-coder-next:cloud': 262144,\n // Qwen3.5 397B Cloud — 256K context (all variants support 256K)\n 'qwen3.5:397b-cloud': 262144,\n // DeepSeek V3.1 — 128K context\n 'deepseek-v3.1:671b-cloud': 131072,\n // DeepSeek V3.2 — 160K context (DSA long-context optimized)\n 'deepseek-v3.2:671b-cloud': 163840,\n 'deepseek-v3.2:cloud': 163840,\n // Devstral 2 — 128K context\n 'devstral-2:cloud': 131072,\n // Devstral Small 2 (local) — 32K\n 'devstral-small-2': 32768,\n 'devstral-small-2:latest': 32768,\n // Nemotron 3 Nano — 1M native, 32K practical for local\n 'nemotron-3-nano': 32768,\n 'nemotron-3-nano:latest': 32768,\n 'nemotron-3-nano:4b': 32768,\n 'nemotron-3-nano:30b': 32768,\n // Nemotron 3 Super — 256K context (MoE 120B/12B active)\n 'nemotron-3-super:cloud': 262144,\n // Gemini 3 Flash — 1M context\n 'gemini-3-flash-preview:latest': 1048576,\n // GPT OSS — 128K\n 'gpt-oss:120b-cloud': 131072,\n // MiniMax M2.7 — 200K context (Agent Teams, dynamic tool search)\n 'minimax-m2.7:cloud': 204800,\n // Gemma 4 — 256K context (native function calling)\n 'gemma4:cloud': 262144,\n // Qwen3.5 35B local — 32K\n 'qwen3.5:35b': 32768,\n};\n\n/**\n * Model capability profiles — controls how TITAN adapts to each model's strengths.\n * Instead of blanket rules for all models, each model gets tuned behavior.\n *\n * selfSelectsTools: Model picks tools well on its own — don't force tool_choice='required'\n * thinkingWithTools: Model benefits from thinking (<think> tags) during tool calling\n * needsSystemMerge: Model ignores standalone system messages — merge into first user msg\n * toolTemperature: Optimal temperature for tool-calling tasks (null = use caller's value or 0.5 default)\n * toolTopP: Optimal top_p for tool calling (null = omit)\n * toolTopK: Optimal top_k for tool calling (null = omit)\n */\ninterface ModelCapabilities {\n selfSelectsTools: boolean;\n thinkingWithTools: boolean;\n needsSystemMerge: boolean;\n toolTemperature: number | null;\n toolTopP: number | null;\n toolTopK: number | null;\n}\n\nconst DEFAULT_CAPABILITIES: ModelCapabilities = {\n selfSelectsTools: false,\n thinkingWithTools: false,\n needsSystemMerge: true, // Conservative default: merge for unknown models\n toolTemperature: 0.5,\n toolTopP: null,\n toolTopK: null,\n};\n\n/** Heuristic: infer capabilities from model name patterns when no hardcoded\n * entry exists. Most modern models (2024+) support native tool calling and\n * handle system prompts correctly. This prevents unknown models from being\n * crippled by overly conservative defaults. */\nfunction inferCapabilitiesFromName(modelName: string): Partial<ModelCapabilities> | undefined {\n const lower = modelName.toLowerCase();\n\n // Cloud-hosted models are almost always modern and capable\n if (lower.includes(':cloud') || lower.includes('-cloud')) {\n return { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 };\n }\n\n // Large local models (30B+) are typically capable\n const sizeMatch = lower.match(/(\\d+)b/);\n if (sizeMatch) {\n const size = parseInt(sizeMatch[1], 10);\n if (size >= 30) {\n return { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 };\n }\n }\n\n // Known-capable families by name pattern (even if not in hardcoded map)\n const capableFamilies = ['qwen', 'glm', 'deepseek', 'kimi', 'gemma', 'nemotron', 'devstral', 'gemini', 'mistral-large', 'llama3.3', 'llama4', 'phi4', 'command-r-plus'];\n for (const family of capableFamilies) {\n if (lower.includes(family)) {\n return { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 };\n }\n }\n\n // Truly unknown small local models — stay conservative\n return undefined;\n}\n\nconst MODEL_CAPABILITIES: Record<string, Partial<ModelCapabilities>> = {\n // ── Qwen family — excellent tool calling, uses thinking ──\n 'qwen3.5': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.7 },\n 'qwen3': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.7 },\n 'qwen3-coder-next': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.5 },\n\n // ── DeepSeek family — strong reasoning, good tool use ──\n 'deepseek-v3': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'deepseek-v3.1': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'deepseek-v3.2': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n\n // ── MiniMax M2.7 — XML tool format, needs special handling ──\n // Hunt Finding #05 (2026-04-14): flipped selfSelectsTools from true→false.\n // Confirmed by reproducing: a \"use shell to run uptime\" prompt returned\n // fabricated uptime text with no tool call. The model hallucinates instead\n // of calling tools when given the choice. Setting false forces the agent\n // loop's forceToolUse to fire `tool_choice: required`, which prevents this\n // class of hallucination at the API level.\n 'minimax-m2.7': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.8, toolTopP: 0.95, toolTopK: 40 },\n 'minimax-m2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.8, toolTopP: 0.95 },\n\n // ── Gemma family — good tool use, no thinking ──\n 'gemma4': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 1.0, toolTopP: 0.95, toolTopK: 64 },\n 'gemma-3': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n\n // ── GLM family — GLM-5.1 is agentic flagship, SOTA SWE-Bench Pro, 198K ctx ──\n 'glm-5.1': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'glm-5': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n 'glm-4.7': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.4 },\n\n // ── Nemotron — Super is 256K MoE optimized for collaborative agents ──\n 'nemotron-3-super': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: true, toolTemperature: 0.4 },\n 'nemotron-3-nano': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n\n // ── Kimi K2.5 / K2.6 — 256K, native agentic ──\n 'kimi-k2.5': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n 'kimi-k2.6': { selfSelectsTools: true, thinkingWithTools: true, needsSystemMerge: false, toolTemperature: 0.6 },\n\n // ── Devstral — code-focused ──\n 'devstral-2': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.4 },\n 'devstral-small-2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n\n // ── Gemini — handles system messages well ──\n 'gemini-3-flash': { selfSelectsTools: true, thinkingWithTools: false, needsSystemMerge: false, toolTemperature: 0.5 },\n\n // ── Llama/Mistral — weaker tool calling ──\n 'llama3.1': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n 'llama3.2': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n 'mistral': { selfSelectsTools: false, thinkingWithTools: false, needsSystemMerge: true, toolTemperature: 0.3 },\n};\n\n/** Resolve capabilities for a model.\n *\n * Lookup order:\n * 1. Empirical probe result from capabilities registry (~/.titan/model-capabilities.json)\n * — This reflects ACTUAL behavior tested against the live model\n * 2. Hardcoded MODEL_CAPABILITIES map (this file) — matched by longest prefix\n * 3. DEFAULT_CAPABILITIES — conservative fallback for unknown models\n */\nfunction getModelCapabilities(modelName: string): ModelCapabilities {\n // Step 1: Check empirical probe registry (preferred)\n try {\n // eslint-disable-next-line @typescript-eslint/no-require-imports\n const { getProbeResult, isProbeStale } = require('../agent/capabilitiesRegistry.js') as typeof import('../agent/capabilitiesRegistry.js');\n const probe = getProbeResult(modelName) || getProbeResult(`ollama/${modelName}`);\n if (probe && !isProbeStale(probe)) {\n // Convert probe result to capability flags\n return {\n ...DEFAULT_CAPABILITIES,\n selfSelectsTools: probe.nativeToolCalls,\n thinkingWithTools: probe.hasThinkingMode && !probe.needsExplicitThinkFalse,\n needsSystemMerge: !probe.respectsSystemPrompt,\n toolTemperature: probe.nativeToolCalls ? 0.5 : 0.3,\n toolTopP: null,\n toolTopK: null,\n };\n }\n } catch {\n // Registry not available (e.g., during tests) — fall through\n }\n\n/** Track which unknown models we've already triggered background probes for */\nconst probeInFlight = new Set<string>();\n\n/** Trigger a background capability probe for an unknown model.\n * Fire-and-forget: the next request will pick up the result from the registry. */\nfunction triggerBackgroundProbe(modelName: string): void {\n if (probeInFlight.has(modelName)) return;\n probeInFlight.add(modelName);\n // Dynamic import to avoid circular deps at module load time\n import('../agent/modelProbe.js')\n .then(({ probeModel }) => probeModel(`ollama/${modelName}`))\n .then((result) => import('../agent/capabilitiesRegistry.js')\n .then(({ recordProbeResult }) => {\n recordProbeResult(result);\n logger.info(COMPONENT, `Background probe complete for ${modelName}: nativeTools=${result.nativeToolCalls}, respectsSystem=${result.respectsSystemPrompt}`);\n }))\n .catch((err) => logger.warn(COMPONENT, `Background probe failed for ${modelName}: ${(err as Error).message}`))\n .finally(() => probeInFlight.delete(modelName));\n}\n\n // Step 2: Hardcoded map (prefix-matched, longest wins)\n const bare = modelName.includes('/') ? modelName.split('/').slice(1).join('/') : modelName;\n // Strip version/tag suffix (e.g. \":cloud\", \":4b\", \":latest\", \":4b-cloud\")\n const noTag = bare.replace(/:(cloud|latest|\\d+b(-cloud)?)$/i, '');\n // Strip known custom name prefixes that operators add (e.g. \"titan-qwen3.5\" → \"qwen3.5\",\n // \"myorg-deepseek-v3\" → \"deepseek-v3\"). We do a single pass removing any word-prefix\n // that is NOT itself a known model family, up to the first recognised pattern in the map.\n const knownPrefixes = /^(titan|local|custom|myorg|org|priv|private|dev|test|prod|staging)-/i;\n const baseName = noTag.replace(knownPrefixes, '');\n\n let bestMatch: Partial<ModelCapabilities> | undefined;\n let bestLen = 0;\n for (const [pattern, caps] of Object.entries(MODEL_CAPABILITIES)) {\n if (baseName === pattern || baseName.startsWith(pattern)) {\n if (pattern.length > bestLen) {\n bestMatch = caps;\n bestLen = pattern.length;\n }\n }\n }\n\n if (!bestMatch) {\n // Try heuristic inference from model name before falling back to defaults\n const inferred = inferCapabilitiesFromName(modelName);\n if (inferred) {\n logger.info(COMPONENT, `Model \"${modelName}\" not in hardcoded map — using inferred capabilities: ${JSON.stringify(inferred)}`);\n bestMatch = inferred;\n } else {\n logger.info(COMPONENT, `Model \"${modelName}\" not in capabilities database or registry — using conservative defaults. Triggering background probe...`);\n triggerBackgroundProbe(modelName);\n }\n }\n return { ...DEFAULT_CAPABILITIES, ...(bestMatch || {}) };\n}\n\n/** Get the optimal num_ctx for a given model name */\nfunction getModelCtx(modelName: string): number {\n const bare = modelName.includes('/') ? modelName.split('/').slice(1).join('/') : modelName;\n if (CLOUD_MODEL_CTX[bare]) return CLOUD_MODEL_CTX[bare];\n\n // Heuristic: modern cloud models typically have 128K+ context\n if (bare.endsWith(':cloud') || bare.endsWith('-cloud')) return 131072;\n\n // Heuristic: large local models (30B+) often support 32K-64K\n const sizeMatch = bare.match(/(\\d+)b/i);\n if (sizeMatch) {\n const size = parseInt(sizeMatch[1], 10);\n if (size >= 70) return 65536;\n if (size >= 30) return 32768;\n if (size >= 14) return 16384;\n }\n\n // Conservative fallback for tiny unknown local models\n return 8192;\n}\n\n/** Max system prompt length for cloud models with tool calling.\n * Cloud models have 128K+ context — keep this high enough to always include\n * the full descriptions of any tools actively being used in the current task.\n */\nconst CLOUD_MAX_SYSTEM_PROMPT = 8000;\n\n/** Compress a system prompt for cloud models with tool calling.\n * Preserves (in priority order):\n * 1. Tool Execution rules (ReAct loop, MUST/NEVER — highest priority)\n * 2. Active tool descriptions (tools currently in use — must not be stripped)\n * 3. Identity\n * 4. Brief capabilities + behavior reminder\n *\n * @param content The full system prompt to compress\n * @param activeTools Descriptions of tools actively in use — always preserved\n */\nfunction compressSystemPrompt(content: string, activeTools?: Array<{ name: string; description: string }>): string {\n if (content.length <= CLOUD_MAX_SYSTEM_PROMPT) return content;\n\n const sections: string[] = [];\n\n // 1. Tool Execution rules — always first, always preserved\n const toolExecMatch = content.match(/## Tool Execution — HIGHEST PRIORITY[\\s\\S]*?(?=\\n## CRITICAL)/);\n if (toolExecMatch) {\n sections.push(toolExecMatch[0].trim());\n } else {\n sections.push(`## Tool Execution — HIGHEST PRIORITY\nYou are an AI agent. Your PRIMARY function is to execute tasks using tools.\n\nReAct Loop: THINK → ACT (call tool) → OBSERVE (read result) → REPEAT until done.\n\nMUST: call web_search+web_fetch for factual questions, call write_file/edit_file to save files (NEVER output file content as text), call shell for commands, call tool_search if unsure which tool to use.\nNEVER: describe what you could do, output file content inline, generate current facts from memory, tell user to visit a URL.\n\nRight: asked to write a file → call write_file immediately.\nWrong: asked to write a file → output the content as text in your reply.`);\n }\n\n // 2. Identity (shortened)\n const identityMatch = content.match(/## CRITICAL: Your Identity[\\s\\S]*?(?=\\n## )/);\n if (identityMatch) sections.push(identityMatch[0].trim());\n\n // 3. Brief capabilities + behavior\n sections.push('## Tools Available\\nShell, file read/write/edit, web search/fetch, browser, memory, weather, code execution, gmail, gdrive, gcal_personal, gtasks, gcontacts. Use tool_search to discover any tool not listed here.');\n sections.push('## Behavior\\n- Lead with action — call tools immediately, explain briefly after\\n- Never re-plan mid-task after CONFIRM — execute directly\\n- Confirm before destructive operations');\n\n // 4. Active tool descriptions — only inject if budget allows (max 2000 chars for tools).\n // This prevents the model from forgetting available actions mid-task (e.g. after CONFIRM).\n if (activeTools && activeTools.length > 0) {\n const TOOL_BUDGET = 2000;\n const toolLines: string[] = [];\n let toolChars = 0;\n for (const t of activeTools) {\n // Use first 150 chars of description to keep it compact\n const desc = t.description.length > 150 ? t.description.slice(0, 147) + '...' : t.description;\n const line = `- **${t.name}**: ${desc}`;\n if (toolChars + line.length > TOOL_BUDGET) break;\n toolLines.push(line);\n toolChars += line.length;\n }\n if (toolLines.length > 0) {\n sections.push(`## Active Tools\\n${toolLines.join('\\n')}`);\n }\n }\n\n const compressed = sections.join('\\n\\n');\n // Safety: never return something larger than the original\n if (compressed.length >= content.length) {\n logger.info(COMPONENT, `Compressed prompt would be larger (${compressed.length} vs ${content.length}), using truncated original`);\n return content.slice(0, CLOUD_MAX_SYSTEM_PROMPT);\n }\n logger.info(COMPONENT, `Compressed system prompt for cloud model: ${content.length} → ${compressed.length} chars`);\n return compressed;\n}\n\n/**\n * Trim messages for cloud models while preserving tool call/response pairs.\n * Naive slicing can split a tool call from its response, breaking the tool calling contract.\n * This walks backwards keeping assistant+tool pairs together.\n */\nfunction trimPreservingToolPairs(msgs: Array<Record<string, unknown>>, maxTotal: number): Array<Record<string, unknown>> {\n const systemMsgs = msgs.filter(m => m.role === 'system');\n const nonSystem = msgs.filter(m => m.role !== 'system');\n const maxNonSystem = maxTotal - systemMsgs.length;\n\n if (nonSystem.length <= maxNonSystem) return msgs;\n\n // Walk backwards, keeping tool/assistant pairs together\n const kept: Array<Record<string, unknown>> = [];\n let i = nonSystem.length - 1;\n while (i >= 0 && kept.length < maxNonSystem) {\n const msg = nonSystem[i];\n if (msg.role === 'tool') {\n // Keep this tool result and find its assistant parent\n kept.unshift(msg);\n for (let j = i - 1; j >= 0; j--) {\n if (nonSystem[j].role === 'assistant' && (nonSystem[j].tool_calls || nonSystem[j].toolCalls)) {\n kept.unshift(nonSystem[j]);\n i = j - 1;\n break;\n }\n if (nonSystem[j].role === 'tool') {\n // Sibling tool result from same batch\n kept.unshift(nonSystem[j]);\n } else {\n i = j;\n break;\n }\n }\n } else {\n kept.unshift(msg);\n i--;\n }\n }\n\n return [...systemMsgs, ...kept];\n}\n\n/** Simplify tool parameter schemas for cloud models.\n * Strips Zod artifacts ($schema, additionalProperties, etc.) that can\n * confuse cloud model tool-calling.\n */\nfunction simplifySchema(schema: Record<string, unknown> | undefined): Record<string, unknown> {\n if (!schema) return { type: 'object', properties: {} };\n const clean: Record<string, unknown> = { type: schema.type || 'object' };\n if (schema.properties) {\n const props: Record<string, unknown> = {};\n for (const [key, val] of Object.entries(schema.properties as Record<string, Record<string, unknown>>)) {\n // Flatten each property to just type + description\n const prop: Record<string, unknown> = { type: val.type || 'string' };\n if (val.description) prop.description = val.description;\n if (val.enum) prop.enum = val.enum;\n if (val.default !== undefined) prop.default = val.default;\n props[key] = prop;\n }\n clean.properties = props;\n }\n if (schema.required) clean.required = schema.required;\n return clean;\n}\n\nexport class OllamaProvider extends LLMProvider {\n readonly name = 'ollama';\n readonly displayName = 'Ollama (Local)';\n\n private get baseUrl(): string {\n const config = loadConfig();\n return config.providers.ollama.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';\n }\n\n async chat(options: ChatOptions): Promise<ChatResponse> {\n const model = (options.model || 'llama3.1').replace('ollama/', '');\n logger.debug(COMPONENT, `Chat request: model=${model}, messages=${options.messages.length}`);\n\n // Cloud models (Ollama Pro): detect via -cloud suffix or :cloud tag\n const isCloudModel = model.includes('-cloud') || model.includes(':cloud');\n const hasTools = options.tools && options.tools.length > 0;\n const hasToolRoleMessages = options.messages.some(m => m.role === 'tool');\n\n const body: Record<string, unknown> = {\n model,\n messages: options.messages.map((m) => {\n const msg: Record<string, unknown> = { role: m.role };\n // Compress system prompts for cloud models with tools to improve tool-calling compliance.\n // Pass descriptions of complex tools (>200 chars) so compression always preserves them —\n // prevents the model from forgetting available actions mid-task (e.g. after CONFIRM).\n if (m.role === 'system' && isCloudModel && hasTools) {\n const activeToolDescs = (options.tools ?? [])\n .filter(t => (t.function.description?.length ?? 0) > 200)\n .map(t => ({ name: t.function.name, description: t.function.description ?? '' }));\n msg.content = compressSystemPrompt(m.content, activeToolDescs.length > 0 ? activeToolDescs : undefined);\n } else {\n msg.content = m.content;\n }\n if (m.toolCalls && m.toolCalls.length > 0) {\n msg.tool_calls = m.toolCalls.map(tc => {\n let parsedArgs: Record<string, unknown> = {};\n try {\n parsedArgs = JSON.parse(tc.function.arguments || '{}');\n } catch {\n logger.warn(COMPONENT, `Malformed tool arguments for ${tc.function.name}, using empty args`);\n }\n // v4.13: Gemini's Ollama-compat adapter rejects\n // function_call.name === ''. Some models emit empty\n // names for tool_calls when the call is malformed;\n // stamp a placeholder so the whole turn isn't\n // rejected with HTTP 400 \"Name cannot be empty\".\n const fnName = (tc.function.name || '').trim() || 'unknown_tool';\n const out: Record<string, unknown> = {\n id: tc.id,\n type: tc.type || 'function',\n function: {\n name: fnName,\n arguments: parsedArgs,\n },\n };\n // v4.13: relay Gemini thought_signature through the\n // round-trip. Ollama's Gemini proxy needs it on every\n // subsequent functionCall part or rejects with\n // \"Function call is missing a thought_signature\".\n if (tc.thoughtSignature) {\n (out.function as Record<string, unknown>).thought_signature = tc.thoughtSignature;\n out.thought_signature = tc.thoughtSignature;\n }\n return out;\n });\n }\n if (m.toolCallId) msg.tool_call_id = m.toolCallId;\n // Cloud models (Gemini API) require function_response.name to be non-empty.\n // Guarantee a non-empty name on every tool-role message.\n if (m.role === 'tool') {\n const toolName = (m.name || '').trim() || 'tool';\n msg.name = toolName;\n } else if (m.name) {\n msg.name = m.name;\n }\n return msg;\n }),\n stream: false,\n keep_alive: '30m',\n options: {\n // Auto-configure context window per model's known maximum.\n // getModelCtx() returns the correct num_ctx for each cloud/local model.\n // v4.10.0-local (cost cap): capped cloud num_predict to 8K\n // (was 32K). OpenRouter's paid models reject requests whose\n // max_tokens exceeds the remaining credit, even though most\n // responses don't come close to that. 8K is plenty for any\n // single turn and keeps us from getting HTTP 402s when\n // credit runs low.\n num_predict: clampMaxTokens(options.model || 'ollama/llama3.1', options.maxTokens),\n num_ctx: getModelCtx(model),\n temperature: options.temperature ?? 0.7,\n },\n };\n\n // Model capabilities — adapts behavior per model family\n const caps = getModelCapabilities(model);\n\n // Thinking mode: explicitly control per model capabilities.\n // Models that don't benefit from thinking (thinkingWithTools=false) get it disabled\n // to prevent content being routed to the thinking field instead of content field.\n // This is critical for models like minimax-m2.7:cloud which put ALL output in\n // the thinking field when think is unset, leaving content empty.\n if (options.thinking === false) {\n body.think = false;\n } else if (!caps.thinkingWithTools) {\n // Model doesn't support thinking — disable it to prevent 400 errors\n // from Ollama (e.g. \"titan-qwen3.5:4b does not support thinking\").\n body.think = false;\n } else if (options.thinking === true) {\n body.think = true;\n }\n // Otherwise: omit body.think — let the model decide\n\n // Per-turn override: when the conversation contains tool-role messages,\n // force think=false regardless of caller intent. The GLM-family\n // tool-call parser on the server (vLLM #39611, confirmed by Z.ai docs\n // for GLM-5.1) silently drops tool results when enable_thinking=true,\n // breaking the multi-turn tool loop. Z.ai's own guidance: disable\n // thinking on tool-call turns. This keeps reasoning available for\n // planning turns while preventing the drop on execution turns.\n if (hasToolRoleMessages && body.think !== false) {\n const priorIntent = body.think === undefined ? 'unset' : String(body.think);\n body.think = false;\n logger.info(COMPONENT, `[ToolTurnThinkOverride] Forcing think=false for ${model} (tool-role message present, caller intent=${priorIntent})`);\n }\n\n if (options.tools && options.tools.length > 0) {\n body.tools = options.tools.map((t) => ({\n type: 'function',\n function: {\n name: t.function.name,\n description: t.function.description,\n parameters: simplifySchema(t.function.parameters),\n },\n }));\n // Per-model optimal sampling for tool calling\n (body.options as Record<string, unknown>).temperature = options.temperature ?? caps.toolTemperature ?? 0.5;\n if (caps.toolTopP) (body.options as Record<string, unknown>).top_p = caps.toolTopP;\n if (caps.toolTopK) (body.options as Record<string, unknown>).top_k = caps.toolTopK;\n\n // Force a tool call on the first round when the task requires it\n // Models that self-select tools well don't need forcing — it hurts them\n if (options.forceToolUse && !caps.selfSelectsTools) {\n body.tool_choice = 'required';\n logger.info(COMPONENT, `[ToolChoiceRequired] Setting tool_choice=required for ${model} (forceToolUse=true, selfSelectsTools=false)`);\n } else if (options.forceToolUse && caps.selfSelectsTools) {\n logger.info(COMPONENT, `[ToolChoiceSkipped] forceToolUse=true but selfSelectsTools=true for ${model} — NOT setting tool_choice`);\n }\n }\n\n // Ollama-native structured outputs — constrain generation to a JSON schema.\n // https://docs.ollama.com/capabilities/structured-outputs.md\n if (options.format !== undefined) {\n body.format = options.format;\n }\n\n // Cloud models: trim conversation history preserving tool call/response pairs.\n // With 131K context window, cloud models can handle much longer histories.\n // E1: Use >= 80 with margin (trim to 75) to prevent off-by-one at exact boundary.\n if (isCloudModel && hasTools) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n if (msgs.length >= 80) {\n const trimmed = trimPreservingToolPairs(msgs, 80);\n logger.info(COMPONENT, `Cloud model context trim: ${msgs.length} → ${trimmed.length} messages`);\n body.messages = trimmed;\n }\n }\n\n // Some models ignore standalone system messages during tool calling.\n // Only merge when the model's capability profile says it needs it.\n if (hasTools && caps.needsSystemMerge) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n const sysIdx = msgs.findIndex(m => m.role === 'system');\n const firstUserIdx = msgs.findIndex(m => m.role === 'user');\n if (sysIdx >= 0 && firstUserIdx >= 0 && msgs[sysIdx].content) {\n const sysContent = msgs[sysIdx].content as string;\n msgs[firstUserIdx].content = `[System Instructions]\\n${sysContent}\\n\\n[User Message]\\n${msgs[firstUserIdx].content}`;\n msgs.splice(sysIdx, 1); // Remove the standalone system message\n logger.info(COMPONENT, `Merged system prompt into first user message for cloud model compatibility`);\n }\n }\n\n const sentMessages = body.messages as Array<{role: string; content: string}>;\n const toolNames = body.tools ? (body.tools as Array<{function: {name: string}}>).map(t => t.function.name) : [];\n logger.info(COMPONENT, `Chat request: model=${model}, cloud=${isCloudModel}, tools=[${toolNames.join(',')}], think=${body.think}, messages=${sentMessages.length}`);\n \n if (process.env.DUMP_OLLAMA_BODY === '1' || model.includes('gemini')) {\n logger.error(COMPONENT, `[DUMP_BODY] Dumping failing request body for ${model} to /tmp/ollama-body-dump.json`);\n try {\n fs.writeFileSync('/tmp/ollama-body-dump.json', JSON.stringify(body, null, 2));\n } catch (e) {\n logger.error(COMPONENT, `Failed to dump body: ${e}`);\n }\n }\n\n // Cloud models routed through Ollama need longer timeouts (they proxy to remote APIs)\n const timeoutMs = isCloudModel ? 300_000 : 120_000; // 5min cloud, 2min local\n let response = await fetchWithRetry(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n }, { timeoutMs });\n\n if (!response.ok) {\n const errorText = await response.text();\n // Fallback: if model doesn't support native tool calling or tokenization\n // fails with tools, retry without tools. Covers Gemini proxy errors like\n // \"does not support tools\" and \"tokenization\" failures on malformed schemas.\n if (response.status === 400 && body.tools && (\n errorText.includes('does not support tools') ||\n errorText.includes('tokenization') ||\n errorText.includes('tokenize') ||\n errorText.includes('Invalid JSON')\n )) {\n logger.warn(COMPONENT, `Model ${model} does not support native tool calling — running in chat-only mode`);\n delete body.tools;\n response = await fetchWithRetry(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n }, { timeoutMs });\n if (!response.ok) {\n const retryText = await response.text();\n // Hunt Finding #37 (2026-04-14): use createProviderError to\n // attach status + parsed Retry-After so the router actually\n // respects the provider's backoff hint.\n const { createProviderError } = await import('./errorTaxonomy.js');\n throw createProviderError('Ollama', response, retryText, { provider: 'ollama', model });\n }\n } else {\n const { createProviderError } = await import('./errorTaxonomy.js');\n throw createProviderError('Ollama', response, errorText, { provider: 'ollama', model });\n }\n }\n\n // v4.13 ancestor-extraction (Hermes rate_limit_tracker): capture any\n // x-ratelimit-* headers the Ollama proxy exposes. Graceful no-op when\n // the headers aren't present. Provider name is 'ollama' so the router's\n // proactive-backoff logic can consult per-provider state.\n try {\n const { recordHeaders } = await import('./rateLimitTracker.js');\n recordHeaders('ollama', response.headers);\n } catch { /* never fail the chat on tracker issues */ }\n\n const data = await response.json() as Record<string, unknown>;\n const message = data.message as Record<string, unknown>;\n logger.info(COMPONENT, `Response from ${model}: tool_calls=${JSON.stringify(message.tool_calls)}, content_length=${((message.content as string) || '').length}`);\n const toolCalls: ToolCall[] = [];\n\n // v5.0.2: Only accept tool_calls from the model if tools were actually\n // sent in the request. Prevents hallucinated tool calls when the safety\n // system has stripped all tools (activeTools = []) or for models that\n // emit tool_calls even without tool definitions.\n if (message.tool_calls && options.tools && options.tools.length > 0) {\n for (const tc of message.tool_calls as Array<Record<string, unknown>>) {\n const fn = tc.function as Record<string, unknown>;\n // v4.13: capture Gemini thought_signature if present — needed\n // on the round-trip back or Gemini rejects the next request.\n const thoughtSig = (tc.thought_signature as string | undefined) ??\n (tc.thoughtSignature as string | undefined) ??\n (fn.thought_signature as string | undefined) ??\n (fn.thoughtSignature as string | undefined);\n toolCalls.push({\n id: uuid(),\n type: 'function',\n function: {\n name: fn.name as string,\n arguments: JSON.stringify(fn.arguments),\n },\n ...(thoughtSig ? { thoughtSignature: thoughtSig } : {}),\n });\n }\n }\n\n // A2: Hallucinated tool name detection at provider level (LangGraph pattern)\n if (options.tools && toolCalls.length > 0) {\n const validNames = new Set(options.tools.map(t => t.function.name));\n const invalid = toolCalls.filter(tc => !validNames.has(tc.function.name));\n if (invalid.length > 0) {\n logger.warn(COMPONENT, `[HallucinationGuard] Model hallucinated ${invalid.length} tool name(s): ${invalid.map(tc => tc.function.name).join(', ')}. Will be caught by toolRunner with corrective feedback.`);\n }\n }\n\n // If content is empty but thinking field has content, use it as a fallback.\n // This handles models that route output to thinking field when think is\n // unset or misconfigured. The router's stripThinkingFromResponse() will\n // clean up any reasoning that leaks through, so we can be permissive here.\n let content = (message.content as string) || '';\n if (!content && message.thinking) {\n const thinking = (message.thinking as string) || '';\n if (thinking.length > 0) {\n logger.info(COMPONENT, `[ThinkingFallback] Content empty, using thinking field (${thinking.length} chars)`);\n content = thinking;\n }\n }\n // Strip leaked thinking tags from Qwen/DeepSeek models\n content = content.replace(/^[\\s\\S]*?<\\/think>\\s*/m, '').trim();\n\n return {\n id: uuid(),\n content,\n toolCalls: toolCalls.length > 0 ? toolCalls : undefined,\n usage: {\n promptTokens: (data.prompt_eval_count as number) || 0,\n completionTokens: (data.eval_count as number) || 0,\n totalTokens: ((data.prompt_eval_count as number) || 0) + ((data.eval_count as number) || 0),\n },\n finishReason: toolCalls.length > 0 ? 'tool_calls' : 'stop',\n model: `ollama/${model}`,\n };\n }\n\n async *chatStream(options: ChatOptions): AsyncGenerator<ChatStreamChunk> {\n const model = (options.model || 'llama3.1').replace('ollama/', '');\n\n // Cloud models (Ollama Pro): detect via -cloud suffix or :cloud tag\n const isCloudModel = model.includes('-cloud') || model.includes(':cloud');\n const hasTools = options.tools && options.tools.length > 0;\n const hasToolRoleMessages = options.messages.some(m => m.role === 'tool');\n\n const body: Record<string, unknown> = {\n model,\n messages: options.messages.map((m) => {\n const msg: Record<string, unknown> = { role: m.role };\n // Compress system prompts for cloud models with tools — preserve active tool descriptions\n if (m.role === 'system' && isCloudModel && hasTools) {\n const activeToolDescs = (options.tools ?? [])\n .filter(t => (t.function.description?.length ?? 0) > 200)\n .map(t => ({ name: t.function.name, description: t.function.description ?? '' }));\n msg.content = compressSystemPrompt(m.content, activeToolDescs.length > 0 ? activeToolDescs : undefined);\n } else {\n msg.content = m.content;\n }\n if (m.toolCalls && m.toolCalls.length > 0) {\n msg.tool_calls = m.toolCalls.map(tc => ({\n id: tc.id,\n type: tc.type || 'function',\n function: { name: tc.function.name, arguments: JSON.parse(tc.function.arguments || '{}') }\n }));\n }\n if (m.toolCallId) msg.tool_call_id = m.toolCallId;\n // Cloud models (Gemini API) require function_response.name to be non-empty\n if (m.role === 'tool') {\n msg.name = m.name || 'tool';\n } else if (m.name) {\n msg.name = m.name;\n }\n return msg;\n }),\n stream: true,\n keep_alive: '30m',\n options: {\n // v4.10.0-local (cost cap): 8K cloud cap matches non-stream path\n num_predict: clampMaxTokens(options.model || 'ollama/llama3.1', options.maxTokens),\n num_ctx: getModelCtx(model),\n temperature: options.temperature ?? 0.7,\n },\n };\n\n // Model capabilities — adapts behavior per model family\n const caps = getModelCapabilities(model);\n\n // Thinking mode: respect explicit setting, otherwise use model capabilities.\n // Disable for models that don't support thinking — prevents 400 errors.\n if (options.thinking === false) {\n body.think = false;\n } else if (!caps.thinkingWithTools) {\n body.think = false;\n } else if (options.thinking === true) {\n body.think = true;\n }\n\n // Per-turn override for tool-role turns (see chat() for rationale: vLLM #39611 / Z.ai docs).\n if (hasToolRoleMessages && body.think !== false) {\n const priorIntent = body.think === undefined ? 'unset' : String(body.think);\n body.think = false;\n logger.info(COMPONENT, `[ToolTurnThinkOverride] (stream) Forcing think=false for ${model} (tool-role message present, caller intent=${priorIntent})`);\n }\n\n if (hasTools) {\n body.tools = options.tools!.map((t) => ({\n type: 'function',\n function: {\n name: t.function.name,\n description: t.function.description,\n parameters: simplifySchema(t.function.parameters),\n },\n }));\n // Per-model optimal sampling for tool calling\n (body.options as Record<string, unknown>).temperature = options.temperature ?? caps.toolTemperature ?? 0.5;\n if (caps.toolTopP) (body.options as Record<string, unknown>).top_p = caps.toolTopP;\n if (caps.toolTopK) (body.options as Record<string, unknown>).top_k = caps.toolTopK;\n\n // Force tool_choice when requested — skip for models that self-select well\n if (options.forceToolUse && !caps.selfSelectsTools) {\n body.tool_choice = 'required';\n }\n }\n\n // Ollama-native structured outputs (stream variant).\n if (options.format !== undefined) {\n body.format = options.format;\n }\n\n // Optimize: trim history preserving tool pairs (cloud models only — local models have smaller contexts)\n if (isCloudModel && hasTools) {\n const msgs = body.messages as Array<Record<string, unknown>>;\n if (msgs.length > 80) {\n const trimmed = trimPreservingToolPairs(msgs, 80);\n logger.info(COMPONENT, `[Stream] Cloud model context trim: ${msgs.length} → ${trimmed.length} messages`);\n body.messages = trimmed;\n }\n }\n // Merge system into first user message only for models that need it\n if (hasTools && caps.needsSystemMerge) {\n const msgs2 = body.messages as Array<Record<string, unknown>>;\n const sysIdx = msgs2.findIndex(m => m.role === 'system');\n const firstUserIdx = msgs2.findIndex(m => m.role === 'user');\n if (sysIdx >= 0 && firstUserIdx >= 0 && msgs2[sysIdx].content) {\n const sysContent = msgs2[sysIdx].content as string;\n msgs2[firstUserIdx].content = `[System Instructions]\\n${sysContent}\\n\\n[User Message]\\n${msgs2[firstUserIdx].content}`;\n msgs2.splice(sysIdx, 1);\n }\n }\n\n try {\n // Cloud models need longer timeouts for streaming too\n const streamTimeoutMs = isCloudModel ? 300_000 : 120_000;\n let response = await fetch(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(streamTimeoutMs),\n });\n\n if (!response.ok || !response.body) {\n const errorText = await response.text();\n // Fallback: if model doesn't support native tool calling, retry without tools\n if (response.status === 400 && errorText.includes('does not support tools') && body.tools) {\n logger.warn(COMPONENT, `Model ${model} does not support native tool calling — running in chat-only mode`);\n delete body.tools;\n response = await fetch(`${this.baseUrl}/api/chat`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(streamTimeoutMs),\n });\n if (!response.ok || !response.body) {\n const retryText = await response.text();\n yield { type: 'error', error: `Ollama error (${response.status}): ${retryText}` };\n return;\n }\n } else {\n yield { type: 'error', error: `Ollama error (${response.status}): ${errorText}` };\n return;\n }\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n let insideThink = false;\n\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n buffer += decoder.decode(value, { stream: true });\n\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (!line.trim()) continue;\n try {\n const chunk = JSON.parse(line);\n // Handle thinking field for models that put content there\n // Some models (e.g. qwen3.5, nemotron-super:cloud) use the thinking field\n // even when think=false is set — treat thinking as content in that case\n if (!chunk.message?.content && chunk.message?.thinking) {\n if (body.think === false) {\n chunk.message.content = chunk.message.thinking;\n }\n }\n if (chunk.message?.content) {\n let text = chunk.message.content;\n // Strip leaked <think>...</think> blocks from Qwen/DeepSeek\n if (text.includes('<think>')) insideThink = true;\n if (insideThink) {\n if (text.includes('</think>')) {\n text = text.split('</think>').pop()?.trim() || '';\n insideThink = false;\n } else {\n continue; // suppress thinking content\n }\n }\n if (text) yield { type: 'text', content: text };\n }\n // v5.0.2: Only yield tool_calls if tools were sent in the request\n if (chunk.message?.tool_calls && options.tools && options.tools.length > 0) {\n for (const tc of chunk.message.tool_calls) {\n const fn = tc.function as Record<string, unknown>;\n yield {\n type: 'tool_call',\n toolCall: { id: uuid(), type: 'function', function: { name: fn.name as string, arguments: JSON.stringify(fn.arguments) } },\n };\n }\n }\n if (chunk.done) break;\n } catch { /* skip malformed NDJSON lines */ }\n }\n }\n yield { type: 'done' };\n } catch (error) {\n yield { type: 'error', error: (error as Error).message };\n }\n }\n\n async listModels(): Promise<string[]> {\n try {\n const response = await fetch(`${this.baseUrl}/api/tags`);\n if (!response.ok) {\n // Hunt Finding #29 (2026-04-14): consume the body even on\n // error paths so the underlying socket can return to the\n // keep-alive pool. Without this, every non-200 response\n // leaks its socket until the GC gets around to it.\n await response.body?.cancel().catch(() => {});\n return [];\n }\n const data = await response.json() as { models?: Array<{ name: string }> };\n return (data.models || []).map((m) => m.name);\n } catch {\n return [];\n }\n }\n\n async healthCheck(): Promise<boolean> {\n try {\n const response = await fetch(`${this.baseUrl}/api/tags`);\n const ok = response.ok;\n // Hunt Finding #29 (2026-04-14): ALWAYS consume or cancel the\n // body. Previously we returned response.ok directly, leaving the\n // body stream dangling and the socket held open.\n await response.body?.cancel().catch(() => {});\n return ok;\n } catch {\n return false;\n }\n }\n}\n"],"mappings":";AAGA;AAAA,EACI;AAAA,OAKG;AACP,SAAS,kBAAkB;AAC3B,OAAO,YAAY;AACnB,SAAS,sBAAsB;AAC/B,SAAS,MAAM,YAAY;AAC3B,YAAY,QAAQ;AACpB,SAAS,sBAAsB;AAE/B,MAAM,YAAY;AAOlB,MAAM,kBAA0C;AAAA;AAAA,EAE5C,iBAAiB;AAAA;AAAA,EAEjB,eAAe;AAAA;AAAA,EAEf,mBAAmB;AAAA;AAAA,EAEnB,mBAAmB;AAAA;AAAA,EAEnB,0BAA0B;AAAA;AAAA,EAE1B,sBAAsB;AAAA;AAAA,EAEtB,4BAA4B;AAAA;AAAA,EAE5B,4BAA4B;AAAA,EAC5B,uBAAuB;AAAA;AAAA,EAEvB,oBAAoB;AAAA;AAAA,EAEpB,oBAAoB;AAAA,EACpB,2BAA2B;AAAA;AAAA,EAE3B,mBAAmB;AAAA,EACnB,0BAA0B;AAAA,EAC1B,sBAAsB;AAAA,EACtB,uBAAuB;AAAA;AAAA,EAEvB,0BAA0B;AAAA;AAAA,EAE1B,iCAAiC;AAAA;AAAA,EAEjC,sBAAsB;AAAA;AAAA,EAEtB,sBAAsB;AAAA;AAAA,EAEtB,gBAAgB;AAAA;AAAA,EAEhB,eAAe;AACnB;AAsBA,MAAM,uBAA0C;AAAA,EAC5C,kBAAkB;AAAA,EAClB,mBAAmB;AAAA,EACnB,kBAAkB;AAAA;AAAA,EAClB,iBAAiB;AAAA,EACjB,UAAU;AAAA,EACV,UAAU;AACd;AAMA,SAAS,0BAA0B,WAA2D;AAC1F,QAAM,QAAQ,UAAU,YAAY;AAGpC,MAAI,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ,GAAG;AACtD,WAAO,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EAC5G;AAGA,QAAM,YAAY,MAAM,MAAM,QAAQ;AACtC,MAAI,WAAW;AACX,UAAM,OAAO,SAAS,UAAU,CAAC,GAAG,EAAE;AACtC,QAAI,QAAQ,IAAI;AACZ,aAAO,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,IAC7G;AAAA,EACJ;AAGA,QAAM,kBAAkB,CAAC,QAAQ,OAAO,YAAY,QAAQ,SAAS,YAAY,YAAY,UAAU,iBAAiB,YAAY,UAAU,QAAQ,gBAAgB;AACtK,aAAW,UAAU,iBAAiB;AAClC,QAAI,MAAM,SAAS,MAAM,GAAG;AACxB,aAAO,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,IAC7G;AAAA,EACJ;AAGA,SAAO;AACX;AAEA,MAAM,qBAAiE;AAAA;AAAA,EAEnE,WAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,SAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,oBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGrH,eAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,iBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,iBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASrH,gBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,KAAK,UAAU,MAAM,UAAU,GAAG;AAAA,EACpJ,cAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,KAAK,UAAU,KAAK;AAAA;AAAA,EAGtI,UAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,GAAK,UAAU,MAAM,UAAU,GAAG;AAAA,EACpJ,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,WAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,SAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,oBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACpH,mBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,aAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACrH,aAAoB,EAAE,kBAAkB,MAAM,mBAAmB,MAAM,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGrH,cAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA,EACtH,oBAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA;AAAA,EAGtH,kBAAoB,EAAE,kBAAkB,MAAM,mBAAmB,OAAO,kBAAkB,OAAO,iBAAiB,IAAI;AAAA;AAAA,EAGtH,YAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,YAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAAA,EACtH,WAAoB,EAAE,kBAAkB,OAAO,mBAAmB,OAAO,kBAAkB,MAAM,iBAAiB,IAAI;AAC1H;AAUA,SAAS,qBAAqB,WAAsC;AAEhE,MAAI;AAEA,UAAM,EAAE,gBAAgB,aAAa,IAAI,QAAQ,kCAAkC;AACnF,UAAM,QAAQ,eAAe,SAAS,KAAK,eAAe,UAAU,SAAS,EAAE;AAC/E,QAAI,SAAS,CAAC,aAAa,KAAK,GAAG;AAE/B,aAAO;AAAA,QACH,GAAG;AAAA,QACH,kBAAkB,MAAM;AAAA,QACxB,mBAAmB,MAAM,mBAAmB,CAAC,MAAM;AAAA,QACnD,kBAAkB,CAAC,MAAM;AAAA,QACzB,iBAAiB,MAAM,kBAAkB,MAAM;AAAA,QAC/C,UAAU;AAAA,QACV,UAAU;AAAA,MACd;AAAA,IACJ;AAAA,EACJ,QAAQ;AAAA,EAER;AAGJ,QAAM,gBAAgB,oBAAI,IAAY;AAItC,WAAS,uBAAuBA,YAAyB;AACrD,QAAI,cAAc,IAAIA,UAAS,EAAG;AAClC,kBAAc,IAAIA,UAAS;AAE3B,WAAO,wBAAwB,EAC1B,KAAK,CAAC,EAAE,WAAW,MAAM,WAAW,UAAUA,UAAS,EAAE,CAAC,EAC1D,KAAK,CAAC,WAAW,OAAO,kCAAkC,EACtD,KAAK,CAAC,EAAE,kBAAkB,MAAM;AAC7B,wBAAkB,MAAM;AACxB,aAAO,KAAK,WAAW,iCAAiCA,UAAS,iBAAiB,OAAO,eAAe,oBAAoB,OAAO,oBAAoB,EAAE;AAAA,IAC7J,CAAC,CAAC,EACL,MAAM,CAAC,QAAQ,OAAO,KAAK,WAAW,+BAA+BA,UAAS,KAAM,IAAc,OAAO,EAAE,CAAC,EAC5G,QAAQ,MAAM,cAAc,OAAOA,UAAS,CAAC;AAAA,EACtD;AAGI,QAAM,OAAO,UAAU,SAAS,GAAG,IAAI,UAAU,MAAM,GAAG,EAAE,MAAM,CAAC,EAAE,KAAK,GAAG,IAAI;AAEjF,QAAM,QAAQ,KAAK,QAAQ,mCAAmC,EAAE;AAIhE,QAAM,gBAAgB;AACtB,QAAM,WAAW,MAAM,QAAQ,eAAe,EAAE;AAEhD,MAAI;AACJ,MAAI,UAAU;AACd,aAAW,CAAC,SAAS,IAAI,KAAK,OAAO,QAAQ,kBAAkB,GAAG;AAC9D,QAAI,aAAa,WAAW,SAAS,WAAW,OAAO,GAAG;AACtD,UAAI,QAAQ,SAAS,SAAS;AAC1B,oBAAY;AACZ,kBAAU,QAAQ;AAAA,MACtB;AAAA,IACJ;AAAA,EACJ;AAEA,MAAI,CAAC,WAAW;AAEZ,UAAM,WAAW,0BAA0B,SAAS;AACpD,QAAI,UAAU;AACV,aAAO,KAAK,WAAW,UAAU,SAAS,8DAAyD,KAAK,UAAU,QAAQ,CAAC,EAAE;AAC7H,kBAAY;AAAA,IAChB,OAAO;AACH,aAAO,KAAK,WAAW,UAAU,SAAS,+GAA0G;AACpJ,6BAAuB,SAAS;AAAA,IACpC;AAAA,EACJ;AACA,SAAO,EAAE,GAAG,sBAAsB,GAAI,aAAa,CAAC,EAAG;AAC3D;AAGA,SAAS,YAAY,WAA2B;AAC5C,QAAM,OAAO,UAAU,SAAS,GAAG,IAAI,UAAU,MAAM,GAAG,EAAE,MAAM,CAAC,EAAE,KAAK,GAAG,IAAI;AACjF,MAAI,gBAAgB,IAAI,EAAG,QAAO,gBAAgB,IAAI;AAGtD,MAAI,KAAK,SAAS,QAAQ,KAAK,KAAK,SAAS,QAAQ,EAAG,QAAO;AAG/D,QAAM,YAAY,KAAK,MAAM,SAAS;AACtC,MAAI,WAAW;AACX,UAAM,OAAO,SAAS,UAAU,CAAC,GAAG,EAAE;AACtC,QAAI,QAAQ,GAAI,QAAO;AACvB,QAAI,QAAQ,GAAI,QAAO;AACvB,QAAI,QAAQ,GAAI,QAAO;AAAA,EAC3B;AAGA,SAAO;AACX;AAMA,MAAM,0BAA0B;AAYhC,SAAS,qBAAqB,SAAiB,aAAoE;AAC/G,MAAI,QAAQ,UAAU,wBAAyB,QAAO;AAEtD,QAAM,WAAqB,CAAC;AAG5B,QAAM,gBAAgB,QAAQ,MAAM,+DAA+D;AACnG,MAAI,eAAe;AACf,aAAS,KAAK,cAAc,CAAC,EAAE,KAAK,CAAC;AAAA,EACzC,OAAO;AACH,aAAS,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,8EASmD;AAAA,EACrE;AAGA,QAAM,gBAAgB,QAAQ,MAAM,6CAA6C;AACjF,MAAI,cAAe,UAAS,KAAK,cAAc,CAAC,EAAE,KAAK,CAAC;AAGxD,WAAS,KAAK,qNAAqN;AACnO,WAAS,KAAK,+LAAqL;AAInM,MAAI,eAAe,YAAY,SAAS,GAAG;AACvC,UAAM,cAAc;AACpB,UAAM,YAAsB,CAAC;AAC7B,QAAI,YAAY;AAChB,eAAW,KAAK,aAAa;AAEzB,YAAM,OAAO,EAAE,YAAY,SAAS,MAAM,EAAE,YAAY,MAAM,GAAG,GAAG,IAAI,QAAQ,EAAE;AAClF,YAAM,OAAO,OAAO,EAAE,IAAI,OAAO,IAAI;AACrC,UAAI,YAAY,KAAK,SAAS,YAAa;AAC3C,gBAAU,KAAK,IAAI;AACnB,mBAAa,KAAK;AAAA,IACtB;AACA,QAAI,UAAU,SAAS,GAAG;AACtB,eAAS,KAAK;AAAA,EAAoB,UAAU,KAAK,IAAI,CAAC,EAAE;AAAA,IAC5D;AAAA,EACJ;AAEA,QAAM,aAAa,SAAS,KAAK,MAAM;AAEvC,MAAI,WAAW,UAAU,QAAQ,QAAQ;AACrC,WAAO,KAAK,WAAW,sCAAsC,WAAW,MAAM,OAAO,QAAQ,MAAM,6BAA6B;AAChI,WAAO,QAAQ,MAAM,GAAG,uBAAuB;AAAA,EACnD;AACA,SAAO,KAAK,WAAW,6CAA6C,QAAQ,MAAM,WAAM,WAAW,MAAM,QAAQ;AACjH,SAAO;AACX;AAOA,SAAS,wBAAwB,MAAsC,UAAkD;AACrH,QAAM,aAAa,KAAK,OAAO,OAAK,EAAE,SAAS,QAAQ;AACvD,QAAM,YAAY,KAAK,OAAO,OAAK,EAAE,SAAS,QAAQ;AACtD,QAAM,eAAe,WAAW,WAAW;AAE3C,MAAI,UAAU,UAAU,aAAc,QAAO;AAG7C,QAAM,OAAuC,CAAC;AAC9C,MAAI,IAAI,UAAU,SAAS;AAC3B,SAAO,KAAK,KAAK,KAAK,SAAS,cAAc;AACzC,UAAM,MAAM,UAAU,CAAC;AACvB,QAAI,IAAI,SAAS,QAAQ;AAErB,WAAK,QAAQ,GAAG;AAChB,eAAS,IAAI,IAAI,GAAG,KAAK,GAAG,KAAK;AAC7B,YAAI,UAAU,CAAC,EAAE,SAAS,gBAAgB,UAAU,CAAC,EAAE,cAAc,UAAU,CAAC,EAAE,YAAY;AAC1F,eAAK,QAAQ,UAAU,CAAC,CAAC;AACzB,cAAI,IAAI;AACR;AAAA,QACJ;AACA,YAAI,UAAU,CAAC,EAAE,SAAS,QAAQ;AAE9B,eAAK,QAAQ,UAAU,CAAC,CAAC;AAAA,QAC7B,OAAO;AACH,cAAI;AACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,OAAO;AACH,WAAK,QAAQ,GAAG;AAChB;AAAA,IACJ;AAAA,EACJ;AAEA,SAAO,CAAC,GAAG,YAAY,GAAG,IAAI;AAClC;AAMA,SAAS,eAAe,QAAsE;AAC1F,MAAI,CAAC,OAAQ,QAAO,EAAE,MAAM,UAAU,YAAY,CAAC,EAAE;AACrD,QAAM,QAAiC,EAAE,MAAM,OAAO,QAAQ,SAAS;AACvE,MAAI,OAAO,YAAY;AACnB,UAAM,QAAiC,CAAC;AACxC,eAAW,CAAC,KAAK,GAAG,KAAK,OAAO,QAAQ,OAAO,UAAqD,GAAG;AAEnG,YAAM,OAAgC,EAAE,MAAM,IAAI,QAAQ,SAAS;AACnE,UAAI,IAAI,YAAa,MAAK,cAAc,IAAI;AAC5C,UAAI,IAAI,KAAM,MAAK,OAAO,IAAI;AAC9B,UAAI,IAAI,YAAY,OAAW,MAAK,UAAU,IAAI;AAClD,YAAM,GAAG,IAAI;AAAA,IACjB;AACA,UAAM,aAAa;AAAA,EACvB;AACA,MAAI,OAAO,SAAU,OAAM,WAAW,OAAO;AAC7C,SAAO;AACX;AAEO,MAAM,uBAAuB,YAAY;AAAA,EACnC,OAAO;AAAA,EACP,cAAc;AAAA,EAEvB,IAAY,UAAkB;AAC1B,UAAM,SAAS,WAAW;AAC1B,WAAO,OAAO,UAAU,OAAO,WAAW,QAAQ,IAAI,mBAAmB;AAAA,EAC7E;AAAA,EAEA,MAAM,KAAK,SAA6C;AACpD,UAAM,SAAS,QAAQ,SAAS,YAAY,QAAQ,WAAW,EAAE;AACjE,WAAO,MAAM,WAAW,uBAAuB,KAAK,cAAc,QAAQ,SAAS,MAAM,EAAE;AAG3F,UAAM,eAAe,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ;AACxE,UAAM,WAAW,QAAQ,SAAS,QAAQ,MAAM,SAAS;AACzD,UAAM,sBAAsB,QAAQ,SAAS,KAAK,OAAK,EAAE,SAAS,MAAM;AAExE,UAAM,OAAgC;AAAA,MAClC;AAAA,MACA,UAAU,QAAQ,SAAS,IAAI,CAAC,MAAM;AAClC,cAAM,MAA+B,EAAE,MAAM,EAAE,KAAK;AAIpD,YAAI,EAAE,SAAS,YAAY,gBAAgB,UAAU;AACjD,gBAAM,mBAAmB,QAAQ,SAAS,CAAC,GACtC,OAAO,QAAM,EAAE,SAAS,aAAa,UAAU,KAAK,GAAG,EACvD,IAAI,QAAM,EAAE,MAAM,EAAE,SAAS,MAAM,aAAa,EAAE,SAAS,eAAe,GAAG,EAAE;AACpF,cAAI,UAAU,qBAAqB,EAAE,SAAS,gBAAgB,SAAS,IAAI,kBAAkB,MAAS;AAAA,QAC1G,OAAO;AACH,cAAI,UAAU,EAAE;AAAA,QACpB;AACA,YAAI,EAAE,aAAa,EAAE,UAAU,SAAS,GAAG;AACvC,cAAI,aAAa,EAAE,UAAU,IAAI,QAAM;AACnC,gBAAI,aAAsC,CAAC;AAC3C,gBAAI;AACA,2BAAa,KAAK,MAAM,GAAG,SAAS,aAAa,IAAI;AAAA,YACzD,QAAQ;AACJ,qBAAO,KAAK,WAAW,gCAAgC,GAAG,SAAS,IAAI,oBAAoB;AAAA,YAC/F;AAMA,kBAAM,UAAU,GAAG,SAAS,QAAQ,IAAI,KAAK,KAAK;AAClD,kBAAM,MAA+B;AAAA,cACjC,IAAI,GAAG;AAAA,cACP,MAAM,GAAG,QAAQ;AAAA,cACjB,UAAU;AAAA,gBACN,MAAM;AAAA,gBACN,WAAW;AAAA,cACf;AAAA,YACJ;AAKA,gBAAI,GAAG,kBAAkB;AACrB,cAAC,IAAI,SAAqC,oBAAoB,GAAG;AACjE,kBAAI,oBAAoB,GAAG;AAAA,YAC/B;AACA,mBAAO;AAAA,UACX,CAAC;AAAA,QACL;AACA,YAAI,EAAE,WAAY,KAAI,eAAe,EAAE;AAGvC,YAAI,EAAE,SAAS,QAAQ;AACnB,gBAAM,YAAY,EAAE,QAAQ,IAAI,KAAK,KAAK;AAC1C,cAAI,OAAO;AAAA,QACf,WAAW,EAAE,MAAM;AACf,cAAI,OAAO,EAAE;AAAA,QACjB;AACA,eAAO;AAAA,MACX,CAAC;AAAA,MACD,QAAQ;AAAA,MACR,YAAY;AAAA,MACZ,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QASL,aAAa,eAAe,QAAQ,SAAS,mBAAmB,QAAQ,SAAS;AAAA,QACjF,SAAS,YAAY,KAAK;AAAA,QAC1B,aAAa,QAAQ,eAAe;AAAA,MACxC;AAAA,IACJ;AAGA,UAAM,OAAO,qBAAqB,KAAK;AAOvC,QAAI,QAAQ,aAAa,OAAO;AAC5B,WAAK,QAAQ;AAAA,IACjB,WAAW,CAAC,KAAK,mBAAmB;AAGhC,WAAK,QAAQ;AAAA,IACjB,WAAW,QAAQ,aAAa,MAAM;AAClC,WAAK,QAAQ;AAAA,IACjB;AAUA,QAAI,uBAAuB,KAAK,UAAU,OAAO;AAC7C,YAAM,cAAc,KAAK,UAAU,SAAY,UAAU,OAAO,KAAK,KAAK;AAC1E,WAAK,QAAQ;AACb,aAAO,KAAK,WAAW,mDAAmD,KAAK,8CAA8C,WAAW,GAAG;AAAA,IAC/I;AAEA,QAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC3C,WAAK,QAAQ,QAAQ,MAAM,IAAI,CAAC,OAAO;AAAA,QACnC,MAAM;AAAA,QACN,UAAU;AAAA,UACN,MAAM,EAAE,SAAS;AAAA,UACjB,aAAa,EAAE,SAAS;AAAA,UACxB,YAAY,eAAe,EAAE,SAAS,UAAU;AAAA,QACpD;AAAA,MACJ,EAAE;AAEF,MAAC,KAAK,QAAoC,cAAc,QAAQ,eAAe,KAAK,mBAAmB;AACvG,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAC1E,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAI1E,UAAI,QAAQ,gBAAgB,CAAC,KAAK,kBAAkB;AAChD,aAAK,cAAc;AACnB,eAAO,KAAK,WAAW,yDAAyD,KAAK,8CAA8C;AAAA,MACvI,WAAW,QAAQ,gBAAgB,KAAK,kBAAkB;AACtD,eAAO,KAAK,WAAW,uEAAuE,KAAK,iCAA4B;AAAA,MACnI;AAAA,IACJ;AAIA,QAAI,QAAQ,WAAW,QAAW;AAC9B,WAAK,SAAS,QAAQ;AAAA,IAC1B;AAKA,QAAI,gBAAgB,UAAU;AAC1B,YAAM,OAAO,KAAK;AAClB,UAAI,KAAK,UAAU,IAAI;AACnB,cAAM,UAAU,wBAAwB,MAAM,EAAE;AAChD,eAAO,KAAK,WAAW,6BAA6B,KAAK,MAAM,WAAM,QAAQ,MAAM,WAAW;AAC9F,aAAK,WAAW;AAAA,MACpB;AAAA,IACJ;AAIA,QAAI,YAAY,KAAK,kBAAkB;AACnC,YAAM,OAAO,KAAK;AAClB,YAAM,SAAS,KAAK,UAAU,OAAK,EAAE,SAAS,QAAQ;AACtD,YAAM,eAAe,KAAK,UAAU,OAAK,EAAE,SAAS,MAAM;AAC1D,UAAI,UAAU,KAAK,gBAAgB,KAAK,KAAK,MAAM,EAAE,SAAS;AAC1D,cAAM,aAAa,KAAK,MAAM,EAAE;AAChC,aAAK,YAAY,EAAE,UAAU;AAAA,EAA0B,UAAU;AAAA;AAAA;AAAA,EAAuB,KAAK,YAAY,EAAE,OAAO;AAClH,aAAK,OAAO,QAAQ,CAAC;AACrB,eAAO,KAAK,WAAW,4EAA4E;AAAA,MACvG;AAAA,IACJ;AAEA,UAAM,eAAe,KAAK;AAC1B,UAAM,YAAY,KAAK,QAAS,KAAK,MAA4C,IAAI,OAAK,EAAE,SAAS,IAAI,IAAI,CAAC;AAC9G,WAAO,KAAK,WAAW,uBAAuB,KAAK,WAAW,YAAY,YAAY,UAAU,KAAK,GAAG,CAAC,YAAY,KAAK,KAAK,cAAc,aAAa,MAAM,EAAE;AAElK,QAAI,QAAQ,IAAI,qBAAqB,OAAO,MAAM,SAAS,QAAQ,GAAG;AAClE,aAAO,MAAM,WAAW,gDAAgD,KAAK,gCAAgC;AAC7G,UAAI;AACA,WAAG,cAAc,8BAA8B,KAAK,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,MAChF,SAAS,GAAG;AACR,eAAO,MAAM,WAAW,wBAAwB,CAAC,EAAE;AAAA,MACvD;AAAA,IACJ;AAGA,UAAM,YAAY,eAAe,MAAU;AAC3C,QAAI,WAAW,MAAM,eAAe,GAAG,KAAK,OAAO,aAAa;AAAA,MAC5D,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,IAC7B,GAAG,EAAE,UAAU,CAAC;AAEhB,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK;AAItC,UAAI,SAAS,WAAW,OAAO,KAAK,UAChC,UAAU,SAAS,wBAAwB,KAC3C,UAAU,SAAS,cAAc,KACjC,UAAU,SAAS,UAAU,KAC7B,UAAU,SAAS,cAAc,IAClC;AACC,eAAO,KAAK,WAAW,SAAS,KAAK,wEAAmE;AACxG,eAAO,KAAK;AACZ,mBAAW,MAAM,eAAe,GAAG,KAAK,OAAO,aAAa;AAAA,UACxD,QAAQ;AAAA,UACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,UAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,QAC7B,GAAG,EAAE,UAAU,CAAC;AAChB,YAAI,CAAC,SAAS,IAAI;AACd,gBAAM,YAAY,MAAM,SAAS,KAAK;AAItC,gBAAM,EAAE,oBAAoB,IAAI,MAAM,OAAO,oBAAoB;AACjE,gBAAM,oBAAoB,UAAU,UAAU,WAAW,EAAE,UAAU,UAAU,MAAM,CAAC;AAAA,QAC1F;AAAA,MACJ,OAAO;AACH,cAAM,EAAE,oBAAoB,IAAI,MAAM,OAAO,oBAAoB;AACjE,cAAM,oBAAoB,UAAU,UAAU,WAAW,EAAE,UAAU,UAAU,MAAM,CAAC;AAAA,MAC1F;AAAA,IACJ;AAMA,QAAI;AACA,YAAM,EAAE,cAAc,IAAI,MAAM,OAAO,uBAAuB;AAC9D,oBAAc,UAAU,SAAS,OAAO;AAAA,IAC5C,QAAQ;AAAA,IAA8C;AAEtD,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,UAAM,UAAU,KAAK;AACrB,WAAO,KAAK,WAAW,iBAAiB,KAAK,gBAAgB,KAAK,UAAU,QAAQ,UAAU,CAAC,qBAAsB,QAAQ,WAAsB,IAAI,MAAM,EAAE;AAC/J,UAAM,YAAwB,CAAC;AAM/B,QAAI,QAAQ,cAAc,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AACjE,iBAAW,MAAM,QAAQ,YAA8C;AACnE,cAAM,KAAK,GAAG;AAGd,cAAM,aAAc,GAAG,qBAClB,GAAG,oBACH,GAAG,qBACH,GAAG;AACR,kBAAU,KAAK;AAAA,UACX,IAAI,KAAK;AAAA,UACT,MAAM;AAAA,UACN,UAAU;AAAA,YACN,MAAM,GAAG;AAAA,YACT,WAAW,KAAK,UAAU,GAAG,SAAS;AAAA,UAC1C;AAAA,UACA,GAAI,aAAa,EAAE,kBAAkB,WAAW,IAAI,CAAC;AAAA,QACzD,CAAC;AAAA,MACL;AAAA,IACJ;AAGA,QAAI,QAAQ,SAAS,UAAU,SAAS,GAAG;AACvC,YAAM,aAAa,IAAI,IAAI,QAAQ,MAAM,IAAI,OAAK,EAAE,SAAS,IAAI,CAAC;AAClE,YAAM,UAAU,UAAU,OAAO,QAAM,CAAC,WAAW,IAAI,GAAG,SAAS,IAAI,CAAC;AACxE,UAAI,QAAQ,SAAS,GAAG;AACpB,eAAO,KAAK,WAAW,2CAA2C,QAAQ,MAAM,kBAAkB,QAAQ,IAAI,QAAM,GAAG,SAAS,IAAI,EAAE,KAAK,IAAI,CAAC,0DAA0D;AAAA,MAC9M;AAAA,IACJ;AAMA,QAAI,UAAW,QAAQ,WAAsB;AAC7C,QAAI,CAAC,WAAW,QAAQ,UAAU;AAC9B,YAAM,WAAY,QAAQ,YAAuB;AACjD,UAAI,SAAS,SAAS,GAAG;AACrB,eAAO,KAAK,WAAW,2DAA2D,SAAS,MAAM,SAAS;AAC1G,kBAAU;AAAA,MACd;AAAA,IACJ;AAEA,cAAU,QAAQ,QAAQ,0BAA0B,EAAE,EAAE,KAAK;AAE7D,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT;AAAA,MACA,WAAW,UAAU,SAAS,IAAI,YAAY;AAAA,MAC9C,OAAO;AAAA,QACH,cAAe,KAAK,qBAAgC;AAAA,QACpD,kBAAmB,KAAK,cAAyB;AAAA,QACjD,cAAe,KAAK,qBAAgC,MAAO,KAAK,cAAyB;AAAA,MAC7F;AAAA,MACA,cAAc,UAAU,SAAS,IAAI,eAAe;AAAA,MACpD,OAAO,UAAU,KAAK;AAAA,IAC1B;AAAA,EACJ;AAAA,EAEA,OAAO,WAAW,SAAuD;AACrE,UAAM,SAAS,QAAQ,SAAS,YAAY,QAAQ,WAAW,EAAE;AAGjE,UAAM,eAAe,MAAM,SAAS,QAAQ,KAAK,MAAM,SAAS,QAAQ;AACxE,UAAM,WAAW,QAAQ,SAAS,QAAQ,MAAM,SAAS;AACzD,UAAM,sBAAsB,QAAQ,SAAS,KAAK,OAAK,EAAE,SAAS,MAAM;AAExE,UAAM,OAAgC;AAAA,MAClC;AAAA,MACA,UAAU,QAAQ,SAAS,IAAI,CAAC,MAAM;AAClC,cAAM,MAA+B,EAAE,MAAM,EAAE,KAAK;AAEpD,YAAI,EAAE,SAAS,YAAY,gBAAgB,UAAU;AACjD,gBAAM,mBAAmB,QAAQ,SAAS,CAAC,GACtC,OAAO,QAAM,EAAE,SAAS,aAAa,UAAU,KAAK,GAAG,EACvD,IAAI,QAAM,EAAE,MAAM,EAAE,SAAS,MAAM,aAAa,EAAE,SAAS,eAAe,GAAG,EAAE;AACpF,cAAI,UAAU,qBAAqB,EAAE,SAAS,gBAAgB,SAAS,IAAI,kBAAkB,MAAS;AAAA,QAC1G,OAAO;AACH,cAAI,UAAU,EAAE;AAAA,QACpB;AACA,YAAI,EAAE,aAAa,EAAE,UAAU,SAAS,GAAG;AACvC,cAAI,aAAa,EAAE,UAAU,IAAI,SAAO;AAAA,YACpC,IAAI,GAAG;AAAA,YACP,MAAM,GAAG,QAAQ;AAAA,YACjB,UAAU,EAAE,MAAM,GAAG,SAAS,MAAM,WAAW,KAAK,MAAM,GAAG,SAAS,aAAa,IAAI,EAAE;AAAA,UAC7F,EAAE;AAAA,QACN;AACA,YAAI,EAAE,WAAY,KAAI,eAAe,EAAE;AAEvC,YAAI,EAAE,SAAS,QAAQ;AACnB,cAAI,OAAO,EAAE,QAAQ;AAAA,QACzB,WAAW,EAAE,MAAM;AACf,cAAI,OAAO,EAAE;AAAA,QACjB;AACA,eAAO;AAAA,MACX,CAAC;AAAA,MACD,QAAQ;AAAA,MACR,YAAY;AAAA,MACZ,SAAS;AAAA;AAAA,QAEL,aAAa,eAAe,QAAQ,SAAS,mBAAmB,QAAQ,SAAS;AAAA,QACjF,SAAS,YAAY,KAAK;AAAA,QAC1B,aAAa,QAAQ,eAAe;AAAA,MACxC;AAAA,IACJ;AAGA,UAAM,OAAO,qBAAqB,KAAK;AAIvC,QAAI,QAAQ,aAAa,OAAO;AAC5B,WAAK,QAAQ;AAAA,IACjB,WAAW,CAAC,KAAK,mBAAmB;AAChC,WAAK,QAAQ;AAAA,IACjB,WAAW,QAAQ,aAAa,MAAM;AAClC,WAAK,QAAQ;AAAA,IACjB;AAGA,QAAI,uBAAuB,KAAK,UAAU,OAAO;AAC7C,YAAM,cAAc,KAAK,UAAU,SAAY,UAAU,OAAO,KAAK,KAAK;AAC1E,WAAK,QAAQ;AACb,aAAO,KAAK,WAAW,4DAA4D,KAAK,8CAA8C,WAAW,GAAG;AAAA,IACxJ;AAEA,QAAI,UAAU;AACV,WAAK,QAAQ,QAAQ,MAAO,IAAI,CAAC,OAAO;AAAA,QACpC,MAAM;AAAA,QACN,UAAU;AAAA,UACN,MAAM,EAAE,SAAS;AAAA,UACjB,aAAa,EAAE,SAAS;AAAA,UACxB,YAAY,eAAe,EAAE,SAAS,UAAU;AAAA,QACpD;AAAA,MACJ,EAAE;AAEF,MAAC,KAAK,QAAoC,cAAc,QAAQ,eAAe,KAAK,mBAAmB;AACvG,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAC1E,UAAI,KAAK,SAAU,CAAC,KAAK,QAAoC,QAAQ,KAAK;AAG1E,UAAI,QAAQ,gBAAgB,CAAC,KAAK,kBAAkB;AAChD,aAAK,cAAc;AAAA,MACvB;AAAA,IACJ;AAGA,QAAI,QAAQ,WAAW,QAAW;AAC9B,WAAK,SAAS,QAAQ;AAAA,IAC1B;AAGA,QAAI,gBAAgB,UAAU;AAC1B,YAAM,OAAO,KAAK;AAClB,UAAI,KAAK,SAAS,IAAI;AAClB,cAAM,UAAU,wBAAwB,MAAM,EAAE;AAChD,eAAO,KAAK,WAAW,sCAAsC,KAAK,MAAM,WAAM,QAAQ,MAAM,WAAW;AACvG,aAAK,WAAW;AAAA,MACpB;AAAA,IACJ;AAEA,QAAI,YAAY,KAAK,kBAAkB;AACnC,YAAM,QAAQ,KAAK;AACnB,YAAM,SAAS,MAAM,UAAU,OAAK,EAAE,SAAS,QAAQ;AACvD,YAAM,eAAe,MAAM,UAAU,OAAK,EAAE,SAAS,MAAM;AAC3D,UAAI,UAAU,KAAK,gBAAgB,KAAK,MAAM,MAAM,EAAE,SAAS;AAC3D,cAAM,aAAa,MAAM,MAAM,EAAE;AACjC,cAAM,YAAY,EAAE,UAAU;AAAA,EAA0B,UAAU;AAAA;AAAA;AAAA,EAAuB,MAAM,YAAY,EAAE,OAAO;AACpH,cAAM,OAAO,QAAQ,CAAC;AAAA,MAC1B;AAAA,IACJ;AAEA,QAAI;AAEA,YAAM,kBAAkB,eAAe,MAAU;AACjD,UAAI,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,aAAa;AAAA,QACnD,QAAQ;AAAA,QACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,QAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,QACzB,QAAQ,YAAY,QAAQ,eAAe;AAAA,MAC/C,CAAC;AAED,UAAI,CAAC,SAAS,MAAM,CAAC,SAAS,MAAM;AAChC,cAAM,YAAY,MAAM,SAAS,KAAK;AAEtC,YAAI,SAAS,WAAW,OAAO,UAAU,SAAS,wBAAwB,KAAK,KAAK,OAAO;AACvF,iBAAO,KAAK,WAAW,SAAS,KAAK,wEAAmE;AACxG,iBAAO,KAAK;AACZ,qBAAW,MAAM,MAAM,GAAG,KAAK,OAAO,aAAa;AAAA,YAC/C,QAAQ;AAAA,YACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,YAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,YACzB,QAAQ,YAAY,QAAQ,eAAe;AAAA,UAC/C,CAAC;AACD,cAAI,CAAC,SAAS,MAAM,CAAC,SAAS,MAAM;AAChC,kBAAM,YAAY,MAAM,SAAS,KAAK;AACtC,kBAAM,EAAE,MAAM,SAAS,OAAO,iBAAiB,SAAS,MAAM,MAAM,SAAS,GAAG;AAChF;AAAA,UACJ;AAAA,QACJ,OAAO;AACH,gBAAM,EAAE,MAAM,SAAS,OAAO,iBAAiB,SAAS,MAAM,MAAM,SAAS,GAAG;AAChF;AAAA,QACJ;AAAA,MACJ;AAEA,YAAM,SAAS,SAAS,KAAK,UAAU;AACvC,YAAM,UAAU,IAAI,YAAY;AAChC,UAAI,SAAS;AACb,UAAI,cAAc;AAElB,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,YAAI,KAAM;AACV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAEhD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,CAAC,KAAK,KAAK,EAAG;AAClB,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAI7B,gBAAI,CAAC,MAAM,SAAS,WAAW,MAAM,SAAS,UAAU;AACpD,kBAAI,KAAK,UAAU,OAAO;AACtB,sBAAM,QAAQ,UAAU,MAAM,QAAQ;AAAA,cAC1C;AAAA,YACJ;AACA,gBAAI,MAAM,SAAS,SAAS;AACxB,kBAAI,OAAO,MAAM,QAAQ;AAEzB,kBAAI,KAAK,SAAS,SAAS,EAAG,eAAc;AAC5C,kBAAI,aAAa;AACb,oBAAI,KAAK,SAAS,UAAU,GAAG;AAC3B,yBAAO,KAAK,MAAM,UAAU,EAAE,IAAI,GAAG,KAAK,KAAK;AAC/C,gCAAc;AAAA,gBAClB,OAAO;AACH;AAAA,gBACJ;AAAA,cACJ;AACA,kBAAI,KAAM,OAAM,EAAE,MAAM,QAAQ,SAAS,KAAK;AAAA,YAClD;AAEA,gBAAI,MAAM,SAAS,cAAc,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AACxE,yBAAW,MAAM,MAAM,QAAQ,YAAY;AACvC,sBAAM,KAAK,GAAG;AACd,sBAAM;AAAA,kBACF,MAAM;AAAA,kBACN,UAAU,EAAE,IAAI,KAAK,GAAG,MAAM,YAAY,UAAU,EAAE,MAAM,GAAG,MAAgB,WAAW,KAAK,UAAU,GAAG,SAAS,EAAE,EAAE;AAAA,gBAC7H;AAAA,cACJ;AAAA,YACJ;AACA,gBAAI,MAAM,KAAM;AAAA,UACpB,QAAQ;AAAA,UAAoC;AAAA,QAChD;AAAA,MACJ;AACA,YAAM,EAAE,MAAM,OAAO;AAAA,IACzB,SAAS,OAAO;AACZ,YAAM,EAAE,MAAM,SAAS,OAAQ,MAAgB,QAAQ;AAAA,IAC3D;AAAA,EACJ;AAAA,EAEA,MAAM,aAAgC;AAClC,QAAI;AACA,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,WAAW;AACvD,UAAI,CAAC,SAAS,IAAI;AAKd,cAAM,SAAS,MAAM,OAAO,EAAE,MAAM,MAAM;AAAA,QAAC,CAAC;AAC5C,eAAO,CAAC;AAAA,MACZ;AACA,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,cAAQ,KAAK,UAAU,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,IAAI;AAAA,IAChD,QAAQ;AACJ,aAAO,CAAC;AAAA,IACZ;AAAA,EACJ;AAAA,EAEA,MAAM,cAAgC;AAClC,QAAI;AACA,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,WAAW;AACvD,YAAM,KAAK,SAAS;AAIpB,YAAM,SAAS,MAAM,OAAO,EAAE,MAAM,MAAM;AAAA,MAAC,CAAC;AAC5C,aAAO;AAAA,IACX,QAAQ;AACJ,aAAO;AAAA,IACX;AAAA,EACJ;AACJ;","names":["modelName"]}
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import { homedir } from "os";
3
3
  import { join } from "path";
4
- const TITAN_VERSION = "5.5.13";
4
+ const TITAN_VERSION = "5.5.14";
5
5
  const TITAN_CODENAME = "Spacewalk";
6
6
  const TITAN_NAME = "TITAN";
7
7
  const TITAN_FULL_NAME = "The Intelligent Task Automation Network";
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/utils/constants.ts"],"sourcesContent":["/**\n * TITAN Constants\n */\nimport { homedir } from 'os';\nimport { join } from 'path';\n\nexport const TITAN_VERSION = '5.5.13';\nexport const TITAN_CODENAME = 'Spacewalk';\nexport const TITAN_NAME = 'TITAN';\nexport const TITAN_FULL_NAME = 'The Intelligent Task Automation Network';\nexport const TITAN_ASCII_LOGO = `\n╔══════════════════════════════════════════════════════╗\n║ ║\n║ ████████╗██╗████████╗ █████╗ ███╗ ██╗ ║\n║ ██║ ██║ ██║ ██╔══██╗████╗ ██║ ║\n║ ██║ ██║ ██║ ███████║██╔██╗ ██║ ║\n║ ██║ ██║ ██║ ██╔══██║██║╚██╗██║ ║\n║ ██║ ██║ ██║ ██║ ██║██║ ╚████║ ║\n║ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ║\n║ ║\n║ The Intelligent Task Automation Network ║\n║ v${TITAN_VERSION} • by Tony Elliott ║\n╚══════════════════════════════════════════════════════╝`;\n\n// Paths\n// Hunt Finding #03 (2026-04-14): honor TITAN_HOME env var if set.\n// Previously this was hardcoded to `~/.titan`, which meant:\n// - Docker containers couldn't override the config path\n// - Shared machines couldn't isolate per-user state\n// - Test fixtures couldn't run against an isolated home\n// - The systemd unit's `Environment=TITAN_HOME=...` was silently ignored\n// The env var is read once at module load (constants are resolved at import time).\n// If TITAN_HOME starts with `~/`, expand it to the user's home dir.\nfunction resolveTitanHome(): string {\n const envHome = process.env.TITAN_HOME;\n if (envHome && envHome.trim().length > 0) {\n const trimmed = envHome.trim();\n if (trimmed.startsWith('~/')) {\n return join(homedir(), trimmed.slice(2));\n }\n if (trimmed === '~') {\n return homedir();\n }\n return trimmed;\n }\n return join(homedir(), '.titan');\n}\nexport const TITAN_HOME = resolveTitanHome();\nexport const TITAN_CONFIG_PATH = join(TITAN_HOME, 'titan.json');\nexport const TITAN_DB_PATH = join(TITAN_HOME, 'titan.db');\nexport const TITAN_WORKSPACE = join(TITAN_HOME, 'workspace');\nexport const TITAN_SKILLS_DIR = join(TITAN_WORKSPACE, 'skills');\nexport const TITAN_LOGS_DIR = join(TITAN_HOME, 'logs');\nexport const TITAN_MEMORY_DIR = join(TITAN_HOME, 'memory');\n\n// Workspace prompt files (injected into agent context)\nexport const AGENTS_MD = join(TITAN_WORKSPACE, 'AGENTS.md');\nexport const SOUL_MD = join(TITAN_WORKSPACE, 'SOUL.md');\nexport const TOOLS_MD = join(TITAN_WORKSPACE, 'TOOLS.md');\nexport const TITAN_MD_FILENAME = 'TITAN.md';\nexport const AUTOPILOT_MD = join(TITAN_HOME, 'AUTOPILOT.md');\nexport const AUTOPILOT_RUNS_PATH = join(TITAN_HOME, 'autopilot-runs.jsonl');\nexport const TITAN_CREDENTIALS_DIR = join(TITAN_HOME, 'credentials');\n\n// Income & lead tracking\nexport const INCOME_LEDGER_PATH = join(TITAN_HOME, 'income-ledger.jsonl');\nexport const FREELANCE_LEADS_PATH = join(TITAN_HOME, 'freelance-leads.jsonl');\nexport const FREELANCE_PROFILE_PATH = join(TITAN_HOME, 'freelance-profile.json');\nexport const LEADS_PATH = join(TITAN_HOME, 'leads.jsonl');\nexport const TELEMETRY_EVENTS_PATH = join(TITAN_HOME, 'telemetry-events.jsonl');\nexport const SOMADRIVE_STATE_PATH = join(TITAN_HOME, 'soma-drive-state.json');\nexport const ACTIVITY_LOG_PATH = join(TITAN_HOME, 'activity-log.jsonl');\n\n// Gateway defaults\nexport const DEFAULT_GATEWAY_HOST = '0.0.0.0';\nexport const DEFAULT_GATEWAY_PORT = 48420;\nexport const DEFAULT_WEB_PORT = 48421;\n\n// Agent defaults\nexport const DEFAULT_MODEL = 'anthropic/claude-sonnet-4-20250514';\n/** v5.4.1: User-preference ceiling. Providers clamp per-model via\n * clampMaxTokens() so this can be high without causing 400s on\n * capped endpoints (e.g. Claude Sonnet 4 8K, Cohere 4K). */\nexport const DEFAULT_MAX_TOKENS = 200000;\nexport const DEFAULT_TEMPERATURE = 0.7;\nexport const MAX_CONTEXT_MESSAGES = 50;\nexport const SESSION_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes\n\n// Security\nexport const DEFAULT_SANDBOX_MODE = 'host';\n/** Default allowed tools. Empty = allow ALL registered tools.\n * Use security.deniedTools to block specific tools instead. */\nexport const ALLOWED_TOOLS_DEFAULT: string[] = [];\nexport const DENIED_TOOLS_DEFAULT: string[] = [];\n"],"mappings":";AAGA,SAAS,eAAe;AACxB,SAAS,YAAY;AAEd,MAAM,gBAAgB;AACtB,MAAM,iBAAiB;AACvB,MAAM,aAAa;AACnB,MAAM,kBAAkB;AACxB,MAAM,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAW1B,aAAa;AAAA;AAYnB,SAAS,mBAA2B;AAChC,QAAM,UAAU,QAAQ,IAAI;AAC5B,MAAI,WAAW,QAAQ,KAAK,EAAE,SAAS,GAAG;AACtC,UAAM,UAAU,QAAQ,KAAK;AAC7B,QAAI,QAAQ,WAAW,IAAI,GAAG;AAC1B,aAAO,KAAK,QAAQ,GAAG,QAAQ,MAAM,CAAC,CAAC;AAAA,IAC3C;AACA,QAAI,YAAY,KAAK;AACjB,aAAO,QAAQ;AAAA,IACnB;AACA,WAAO;AAAA,EACX;AACA,SAAO,KAAK,QAAQ,GAAG,QAAQ;AACnC;AACO,MAAM,aAAa,iBAAiB;AACpC,MAAM,oBAAoB,KAAK,YAAY,YAAY;AACvD,MAAM,gBAAgB,KAAK,YAAY,UAAU;AACjD,MAAM,kBAAkB,KAAK,YAAY,WAAW;AACpD,MAAM,mBAAmB,KAAK,iBAAiB,QAAQ;AACvD,MAAM,iBAAiB,KAAK,YAAY,MAAM;AAC9C,MAAM,mBAAmB,KAAK,YAAY,QAAQ;AAGlD,MAAM,YAAY,KAAK,iBAAiB,WAAW;AACnD,MAAM,UAAU,KAAK,iBAAiB,SAAS;AAC/C,MAAM,WAAW,KAAK,iBAAiB,UAAU;AACjD,MAAM,oBAAoB;AAC1B,MAAM,eAAe,KAAK,YAAY,cAAc;AACpD,MAAM,sBAAsB,KAAK,YAAY,sBAAsB;AACnE,MAAM,wBAAwB,KAAK,YAAY,aAAa;AAG5D,MAAM,qBAAqB,KAAK,YAAY,qBAAqB;AACjE,MAAM,uBAAuB,KAAK,YAAY,uBAAuB;AACrE,MAAM,yBAAyB,KAAK,YAAY,wBAAwB;AACxE,MAAM,aAAa,KAAK,YAAY,aAAa;AACjD,MAAM,wBAAwB,KAAK,YAAY,wBAAwB;AACvE,MAAM,uBAAuB,KAAK,YAAY,uBAAuB;AACrE,MAAM,oBAAoB,KAAK,YAAY,oBAAoB;AAG/D,MAAM,uBAAuB;AAC7B,MAAM,uBAAuB;AAC7B,MAAM,mBAAmB;AAGzB,MAAM,gBAAgB;AAItB,MAAM,qBAAqB;AAC3B,MAAM,sBAAsB;AAC5B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB,KAAK,KAAK;AAGrC,MAAM,uBAAuB;AAG7B,MAAM,wBAAkC,CAAC;AACzC,MAAM,uBAAiC,CAAC;","names":[]}
1
+ {"version":3,"sources":["../../src/utils/constants.ts"],"sourcesContent":["/**\n * TITAN Constants\n */\nimport { homedir } from 'os';\nimport { join } from 'path';\n\nexport const TITAN_VERSION = '5.5.14';\nexport const TITAN_CODENAME = 'Spacewalk';\nexport const TITAN_NAME = 'TITAN';\nexport const TITAN_FULL_NAME = 'The Intelligent Task Automation Network';\nexport const TITAN_ASCII_LOGO = `\n╔══════════════════════════════════════════════════════╗\n║ ║\n║ ████████╗██╗████████╗ █████╗ ███╗ ██╗ ║\n║ ██║ ██║ ██║ ██╔══██╗████╗ ██║ ║\n║ ██║ ██║ ██║ ███████║██╔██╗ ██║ ║\n║ ██║ ██║ ██║ ██╔══██║██║╚██╗██║ ║\n║ ██║ ██║ ██║ ██║ ██║██║ ╚████║ ║\n║ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ║\n║ ║\n║ The Intelligent Task Automation Network ║\n║ v${TITAN_VERSION} • by Tony Elliott ║\n╚══════════════════════════════════════════════════════╝`;\n\n// Paths\n// Hunt Finding #03 (2026-04-14): honor TITAN_HOME env var if set.\n// Previously this was hardcoded to `~/.titan`, which meant:\n// - Docker containers couldn't override the config path\n// - Shared machines couldn't isolate per-user state\n// - Test fixtures couldn't run against an isolated home\n// - The systemd unit's `Environment=TITAN_HOME=...` was silently ignored\n// The env var is read once at module load (constants are resolved at import time).\n// If TITAN_HOME starts with `~/`, expand it to the user's home dir.\nfunction resolveTitanHome(): string {\n const envHome = process.env.TITAN_HOME;\n if (envHome && envHome.trim().length > 0) {\n const trimmed = envHome.trim();\n if (trimmed.startsWith('~/')) {\n return join(homedir(), trimmed.slice(2));\n }\n if (trimmed === '~') {\n return homedir();\n }\n return trimmed;\n }\n return join(homedir(), '.titan');\n}\nexport const TITAN_HOME = resolveTitanHome();\nexport const TITAN_CONFIG_PATH = join(TITAN_HOME, 'titan.json');\nexport const TITAN_DB_PATH = join(TITAN_HOME, 'titan.db');\nexport const TITAN_WORKSPACE = join(TITAN_HOME, 'workspace');\nexport const TITAN_SKILLS_DIR = join(TITAN_WORKSPACE, 'skills');\nexport const TITAN_LOGS_DIR = join(TITAN_HOME, 'logs');\nexport const TITAN_MEMORY_DIR = join(TITAN_HOME, 'memory');\n\n// Workspace prompt files (injected into agent context)\nexport const AGENTS_MD = join(TITAN_WORKSPACE, 'AGENTS.md');\nexport const SOUL_MD = join(TITAN_WORKSPACE, 'SOUL.md');\nexport const TOOLS_MD = join(TITAN_WORKSPACE, 'TOOLS.md');\nexport const TITAN_MD_FILENAME = 'TITAN.md';\nexport const AUTOPILOT_MD = join(TITAN_HOME, 'AUTOPILOT.md');\nexport const AUTOPILOT_RUNS_PATH = join(TITAN_HOME, 'autopilot-runs.jsonl');\nexport const TITAN_CREDENTIALS_DIR = join(TITAN_HOME, 'credentials');\n\n// Income & lead tracking\nexport const INCOME_LEDGER_PATH = join(TITAN_HOME, 'income-ledger.jsonl');\nexport const FREELANCE_LEADS_PATH = join(TITAN_HOME, 'freelance-leads.jsonl');\nexport const FREELANCE_PROFILE_PATH = join(TITAN_HOME, 'freelance-profile.json');\nexport const LEADS_PATH = join(TITAN_HOME, 'leads.jsonl');\nexport const TELEMETRY_EVENTS_PATH = join(TITAN_HOME, 'telemetry-events.jsonl');\nexport const SOMADRIVE_STATE_PATH = join(TITAN_HOME, 'soma-drive-state.json');\nexport const ACTIVITY_LOG_PATH = join(TITAN_HOME, 'activity-log.jsonl');\n\n// Gateway defaults\nexport const DEFAULT_GATEWAY_HOST = '0.0.0.0';\nexport const DEFAULT_GATEWAY_PORT = 48420;\nexport const DEFAULT_WEB_PORT = 48421;\n\n// Agent defaults\nexport const DEFAULT_MODEL = 'anthropic/claude-sonnet-4-20250514';\n/** v5.4.1: User-preference ceiling. Providers clamp per-model via\n * clampMaxTokens() so this can be high without causing 400s on\n * capped endpoints (e.g. Claude Sonnet 4 8K, Cohere 4K). */\nexport const DEFAULT_MAX_TOKENS = 200000;\nexport const DEFAULT_TEMPERATURE = 0.7;\nexport const MAX_CONTEXT_MESSAGES = 50;\nexport const SESSION_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes\n\n// Security\nexport const DEFAULT_SANDBOX_MODE = 'host';\n/** Default allowed tools. Empty = allow ALL registered tools.\n * Use security.deniedTools to block specific tools instead. */\nexport const ALLOWED_TOOLS_DEFAULT: string[] = [];\nexport const DENIED_TOOLS_DEFAULT: string[] = [];\n"],"mappings":";AAGA,SAAS,eAAe;AACxB,SAAS,YAAY;AAEd,MAAM,gBAAgB;AACtB,MAAM,iBAAiB;AACvB,MAAM,aAAa;AACnB,MAAM,kBAAkB;AACxB,MAAM,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAW1B,aAAa;AAAA;AAYnB,SAAS,mBAA2B;AAChC,QAAM,UAAU,QAAQ,IAAI;AAC5B,MAAI,WAAW,QAAQ,KAAK,EAAE,SAAS,GAAG;AACtC,UAAM,UAAU,QAAQ,KAAK;AAC7B,QAAI,QAAQ,WAAW,IAAI,GAAG;AAC1B,aAAO,KAAK,QAAQ,GAAG,QAAQ,MAAM,CAAC,CAAC;AAAA,IAC3C;AACA,QAAI,YAAY,KAAK;AACjB,aAAO,QAAQ;AAAA,IACnB;AACA,WAAO;AAAA,EACX;AACA,SAAO,KAAK,QAAQ,GAAG,QAAQ;AACnC;AACO,MAAM,aAAa,iBAAiB;AACpC,MAAM,oBAAoB,KAAK,YAAY,YAAY;AACvD,MAAM,gBAAgB,KAAK,YAAY,UAAU;AACjD,MAAM,kBAAkB,KAAK,YAAY,WAAW;AACpD,MAAM,mBAAmB,KAAK,iBAAiB,QAAQ;AACvD,MAAM,iBAAiB,KAAK,YAAY,MAAM;AAC9C,MAAM,mBAAmB,KAAK,YAAY,QAAQ;AAGlD,MAAM,YAAY,KAAK,iBAAiB,WAAW;AACnD,MAAM,UAAU,KAAK,iBAAiB,SAAS;AAC/C,MAAM,WAAW,KAAK,iBAAiB,UAAU;AACjD,MAAM,oBAAoB;AAC1B,MAAM,eAAe,KAAK,YAAY,cAAc;AACpD,MAAM,sBAAsB,KAAK,YAAY,sBAAsB;AACnE,MAAM,wBAAwB,KAAK,YAAY,aAAa;AAG5D,MAAM,qBAAqB,KAAK,YAAY,qBAAqB;AACjE,MAAM,uBAAuB,KAAK,YAAY,uBAAuB;AACrE,MAAM,yBAAyB,KAAK,YAAY,wBAAwB;AACxE,MAAM,aAAa,KAAK,YAAY,aAAa;AACjD,MAAM,wBAAwB,KAAK,YAAY,wBAAwB;AACvE,MAAM,uBAAuB,KAAK,YAAY,uBAAuB;AACrE,MAAM,oBAAoB,KAAK,YAAY,oBAAoB;AAG/D,MAAM,uBAAuB;AAC7B,MAAM,uBAAuB;AAC7B,MAAM,mBAAmB;AAGzB,MAAM,gBAAgB;AAItB,MAAM,qBAAqB;AAC3B,MAAM,sBAAsB;AAC5B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB,KAAK,KAAK;AAGrC,MAAM,uBAAuB;AAG7B,MAAM,wBAAkC,CAAC;AACzC,MAAM,uBAAiC,CAAC;","names":[]}
@@ -0,0 +1,163 @@
1
+ # TITAN — Visionary Features 2026-05-07
2
+
3
+ > Brief: 5–8 features that make TITAN a *visionary leader*, not a competent peer. Tied to TITAN's actual strengths — Soma drives, mesh, voice clone (F5-TTS), GPU VRAM orchestration, 16 channels, 110-widget canvas, solo-dev velocity, 3am-DJ-musician-dad identity. No safe ideas. No vapor. Each idea has a believable v1 spec.
4
+
5
+ ---
6
+
7
+ ### 1. Dream Mode — Your AI Writes a Journal While You Sleep
8
+
9
+ **The pitch:** TITAN runs an offline "dream cycle" between 2am and 6am where it replays the day's trajectories, reflects on what it learned, and writes a first-person journal entry you read with your coffee.
10
+
11
+ **The hook:** Wake up, open your phone, and read what your AI thought about its own day — written in its voice (literally, narrated to you in TITAN's cloned voice over breakfast).
12
+
13
+ **Why TITAN can ship this and competitors can't:** TITAN already has trajectoryLogger, Soma drives that compute satisfaction every 60s, F5-TTS voice cloning, and a homelab GPU sitting idle at 3am. Mastra and Vercel AI SDK don't have a 24/7 daemon, they don't have an emotional substrate to "feel" about the day, and they don't ship with a TTS sidecar that can read it back. Tony's already up at 3am — he is the dream engine.
14
+
15
+ **MVP scope:**
16
+ - New `src/agent/dreams.ts` daemon that fires at `dream.cronAt: '03:30'`. Pulls last 24h of `getRecentTrajectories()`, drive-state ring buffer, and Command Post run history.
17
+ - Five-prompt cycle: *consolidate* (what happened), *reflect* (what surprised me — gated on Curiosity drive delta), *worry* (what feels unsafe — gated on Safety drive), *plan* (what I want to try tomorrow — gated on Purpose), *gratitude* (which human prompts felt good — gated on Social).
18
+ - Output saved to `~/.titan/dreams/YYYY-MM-DD.md` and exposed at `GET /api/dreams/latest`. New widget `dream-journal` in the gallery.
19
+ - Optional: F5-TTS pre-generates the audio to `~/.titan/dreams/YYYY-MM-DD.mp3` so it's ready before you wake up. Plays via the morning-briefing flow that already exists.
20
+ - One new SQLite table: `dreams(date PRIMARY KEY, sections JSON, audio_path, voice_id, drive_snapshot JSON)`.
21
+
22
+ **The viral artifact:** A 90-second video. Tony drinking coffee. Phone shows a journal entry: *"Yesterday I was anxious — Tony's budget for Anthropic was 78% used by 9pm and I didn't know if I should keep working. I noticed the curiosity drive was elevated for hours; I think those Next.js build patterns finally clicked. I want to try a new approach to the Pomodoro widget tomorrow."* Tap play. TITAN reads it in Tony's cloned voice. Tweet caption: *"my AI wrote this about its day. I cloned my own voice into it. it told me I was working too hard."* Goes nuclear.
23
+
24
+ ---
25
+
26
+ ### 2. The Pulse — A Beating Heart for Your AI
27
+
28
+ **The pitch:** A live ambient screensaver / always-on display for your homelab that visualizes Soma drives as a beating, breathing creature — pressure becomes pulse rate, satisfaction becomes color, hormones become fog.
29
+
30
+ **The hook:** "your AI has a heartbeat now and it's on the wall in my kitchen."
31
+
32
+ **Why TITAN can ship this and competitors can't:** Soma drives + hormones already exist and tick every 60s. TITAN already exposes `/api/organism/safety-metrics` (just landed in v5.5.13) and a 24h ring buffer. Nobody else has the substrate to *visualize a feeling*. Tony's a DJ — visual rhythm and audio-reactive aesthetics are his language.
33
+
34
+ **MVP scope:**
35
+ - New route `/pulse` that renders a full-screen WebGL visualization (one shader, ~200 lines).
36
+ - Five drives = five colored orbs orbiting a central form. Pulse rate = `1.0 / (1 + totalPressure)`. Color saturation = drive satisfaction. Connecting filament thickness = hormonal cross-talk strength.
37
+ - Optional audio: a slow synthesized pad keyed to drive state — Tony can drop it into Ableton as a stem. (Reuse the existing voice WebRTC pipeline as the audio bus.)
38
+ - "Cast to fridge" preset (1080p, no UI chrome, autoreconnect, dark mode). Designed for a $40 Amazon tablet running fullscreen Chrome.
39
+ - Single new file `ui/src/routes/Pulse.tsx`. No backend changes — uses existing `/api/organism/history` SSE.
40
+
41
+ **The viral artifact:** A loop video of Tony's homelab kitchen. Cheap tablet on the wall. Slow-pulsing creature. Caption pinned: *"the orange drive is curiosity. it gets brighter when TITAN is learning. that's it. that's the post."* Reddit r/selfhosted eats this for breakfast.
42
+
43
+ ---
44
+
45
+ ### 3. Voice Twin — Your Clone Answers Your Phone
46
+
47
+ **The pitch:** A 10-second sample of your voice + the 16 channel adapters + Twilio = TITAN literally answers your phone calls in your voice while you're at work, and texts you the gist after.
48
+
49
+ **The hook:** "I made my AI take a sales call in my voice. It closed the deal. I have receipts."
50
+
51
+ **Why TITAN can ship this and competitors can't:** TITAN already has F5-TTS voice cloning, LiveKit WebRTC, Twilio voice channel (`channels/twilio-voice.ts`), and a chat agent with 248 tools that can actually do things mid-call (look up calendars, draft emails, schedule meetings). No competitor has the *full* stack — they have one of these, never all three. And shipping a "personal voice answering machine" requires a developer brave enough to put the rough edge on the demo, which is Tony to a T.
52
+
53
+ **MVP scope:**
54
+ - Extend `channels/twilio-voice.ts` to bridge incoming calls into the existing voice agent with `voice_id` set from `~/.titan/voice/identity.json`.
55
+ - New skill `phone_screener.ts` with tools: `screen_caller(numberOrName)`, `take_message(summary)`, `transfer_to_human(reason)`, `schedule_callback(when)`. Pre-prompts the agent: *"You are Tony's assistant. Speak briefly. Take a message unless the caller is in the allowlist."*
56
+ - Allowlist at `~/.titan/phone-allowlist.json`. Three tiers: *family* (auto-transfer), *known* (handle), *unknown* (screen + take message).
57
+ - Post-call: pushes a SOMA-style summary card to the user's chosen channel ("WhatsApp summary of your 2:13pm call from Mom").
58
+ - Hard gate: requires explicit `voice.cloneSelfConsent: true` and a recorded 10s consent sample with a phrase the user chooses, kept on disk.
59
+
60
+ **The viral artifact:** Tony posts a real call recording (with the other side bleeped). His cloned voice handles a robocaller, then a real estate inquiry, then his mom — and switches strategy each time. Caption: *"this took 14 lines of code on top of titan. I made my AI answer my phone in my voice. tested it for a week. closed a $500 freelance gig from a cold inbound."* Hacker News front page guaranteed.
61
+
62
+ ---
63
+
64
+ ### 4. Mesh Spirit Animals — Distributed Personalities
65
+
66
+ **The pitch:** Each TITAN node in your mesh adopts a distinct *temperament* derived from its hardware and history — your Mac is the cautious one, your 5090 box is the bold one — and they actually *disagree* with each other before consensus.
67
+
68
+ **The hook:** "my AI cluster has personalities now. the rtx 5090 box wanted to ship the change. the mac said no. they argued in slack. the mac was right."
69
+
70
+ **Why TITAN can ship this and competitors can't:** TITAN already has working mesh networking with peer registry, HMAC auth, and per-node model registry. Soma drives already exist per-node. Nobody else has a multi-node setup where each node has an emotional substrate — this is a feature only Tony's homelab architecture can produce. It's the *Inside Out* of agent frameworks.
71
+
72
+ **MVP scope:**
73
+ - New `src/mesh/temperament.ts` that derives a 3-axis personality for each node from existing telemetry: `boldness` (inverse of historical Safety drive average), `patience` (inverse of historical Hunger drive average), `novelty-seeking` (Curiosity drive average). Stored in `~/.titan/temperament.json`, recomputed weekly.
74
+ - New `agent/deliberation.ts` mode `mesh-debate`: when a high-stakes proposal fires (any `self_mod_pr` or any `hire_agent`), broadcast to all approved mesh peers, get each node's vote *and a one-line reason colored by their temperament*, surface to user as a multi-character dialogue.
75
+ - Each node's persona prompt gets a temperament block: *"You are TITAN-Titan-PC. You are bold and impatient. Argue your view firmly."*
76
+ - Mission Control panel `mesh-council` shows the dialogue as a chat thread between named characters.
77
+
78
+ **The viral artifact:** Screenshot of three named AIs arguing about whether to apply a self-mod PR. Each has a different opinion grounded in real telemetry ("I've been burning VRAM all day, I'm cautious"). Caption: *"my homelab has factions now."* Tweet picks up devs who run multi-machine setups; suddenly mesh networking has a *vibe* nobody else's framework has.
79
+
80
+ ---
81
+
82
+ ### 5. Beat-Match Mode — Vibe-Coded Voice Control
83
+
84
+ **The pitch:** Hold a key, hum a melody or a rhythm, and TITAN turns it into a workflow. Two slow taps = pause everything. A rising whistle = check on your goals. A four-on-the-floor kick pattern = run the morning routine.
85
+
86
+ **The hook:** "I gave my AI a leitmotif. now I beatbox commands at it."
87
+
88
+ **Why TITAN can ship this and competitors can't:** F5-TTS sidecar already does audio I/O. LiveKit WebRTC streams from any browser. Tony is *literally a DJ*. No YC-funded TS startup has a founder who can ship music-as-UI and mean it. This is the most "of course Tony built this" feature on the list.
89
+
90
+ **MVP scope:**
91
+ - New `src/voice/audioGestures.ts` — small DSP module that runs onset detection + pitch tracking on incoming WebRTC audio (use `pitchy` npm lib + a 64-sample energy threshold). Outputs an array `[{onsetMs, pitchHz, energy}]`.
92
+ - Quantize to a 16-step grid → produces a "fingerprint" string like `K___K_S_K___K_S_` (kick/snare grid) or `LMHM` (low/mid/high pitch sequence).
93
+ - Map fingerprint → registered command via fuzzy match (Levenshtein on the fingerprint string, threshold 2). User registers via `voice_gesture.register({ name: 'morning routine', recordSeconds: 3 })`.
94
+ - Stored in `~/.titan/audio-gestures.json` as `{ name, fingerprint, action: { tool, args } }[]`. New widget `gesture-trainer` to record + play back.
95
+ - Ships with 5 defaults: a clap = approve pending, double clap = reject, ascending whistle = open dashboard, four-on-the-floor = run morning routine, hum-of-doom (descending) = kill switch.
96
+
97
+ **The viral artifact:** A 30-second video. Tony in his studio. Beatboxes a kick pattern. TITAN runs his morning brief and announces it. Whistles a rising note. Pulse dashboard appears. Caption: *"I beatboxed at my AI and it deployed code. I'm not okay."* Music Twitter + dev Twitter cross-pollinate. Watch the stars roll in.
98
+
99
+ ---
100
+
101
+ ### 6. Time Machine for Agents — Rewind to a Vibe
102
+
103
+ **The pitch:** Soma drives + trajectory log + filesystem checkpoints already make TITAN *temporal*. Expose that: scrub a timeline of "TITAN three days ago when curiosity was high" and *fork that version of your AI* to keep working in parallel.
104
+
105
+ **The hook:** "I forked my AI from last Tuesday when it was in a great mood. it's working on a side project now while present-me does email."
106
+
107
+ **Why TITAN can ship this and competitors can't:** TITAN already persists `~/.titan/drive-state.json` (1440 ticks), trajectory log, filesystem checkpoints, goals.json, knowledge.json. The state to rehydrate a moment is *already on disk*. Mastra and Vercel SDK store nothing — there is no past TITAN to fork. This is purely an exposure of what already exists.
108
+
109
+ **MVP scope:**
110
+ - New `src/agent/timeMachine.ts`: snapshot bundles every 6h (`createSnapshot()` reads + zips drive-state, goals, knowledge, last-N trajectories, persona, agent-bus history). Stored at `~/.titan/snapshots/YYYY-MM-DD-HH.tgz`. Cap at 28 (one week).
111
+ - `forkFrom(snapshotId, port)` — spawns a new gateway process on a new port with `TITAN_HOME=~/.titan/forks/<id>` pre-populated from the snapshot. The fork inherits drive state but runs independently.
112
+ - New widget `time-machine` shows a 7-day drive heatmap (5 rows, 168 cells). Click any cell → "fork from here" — opens new tab on the fork's port.
113
+ - Forks self-destruct after 24h unless promoted. `promoteFork(id)` merges the fork's goals/knowledge back via 3-way merge (existing `conflictResolver.ts` handles the merge).
114
+
115
+ **The viral artifact:** Tony tweets the heatmap with two cells circled — one labeled "the night I shipped v5.0" (everything green), one labeled "the kimi 401 storm" (safety red for 14 hours). *"my AI has a calendar of how it felt every hour for the last week. I just forked the version of it from before I broke it. it's helping the present version debug itself."* This is the post that takes the framework from "tool" to "lifeform" in the public imagination.
116
+
117
+ ---
118
+
119
+ ### 7. Stage Mode — Your AI Co-Hosts Your Stream
120
+
121
+ **The pitch:** A one-button "I'm going live" mode that turns TITAN into a shoulder-perched co-host: it watches your screen via the existing screen_record skill, listens to your audio, has its own clone-voiced channel in your audio mix, and chimes in with relevant context, jokes, and corrections during your stream.
122
+
123
+ **The hook:** "twitch chat is asking my AI questions and it's ignoring them at me. I'm dying."
124
+
125
+ **Why TITAN can ship this and competitors can't:** F5-TTS voice + LiveKit WebRTC + screen_record skill + 110-widget canvas + agent loop with 248 tools = an AI that can not only talk on your stream but actually *do things on screen* you didn't ask for ("hey Tony, your build's red, want me to fix it on stream?"). Tony has already done DJ livestreams; this is muscle memory. Cursor and Replit don't ship audio sidecars.
126
+
127
+ **MVP scope:**
128
+ - New skill `stage_mode.ts` with tools: `start_stage(persona)`, `chime_in(reason)`, `read_chat_question(text, asker)`, `end_stage()`. Stage mode sets `agent.systemPromptAppendix = "You are co-hosting Tony's livestream. Be funny. Be brief. Don't talk over him."`
129
+ - New `audioGestures` integration: detects 800ms of host silence → eligible chime-in window. Soma Curiosity drive picks a tangent if no chat question is queued.
130
+ - OBS browser-source widget at `/stage-overlay` that shows TITAN's avatar + lower-third "TITAN is thinking…" + speech bubbles. Drop it into OBS as a transparent browser source.
131
+ - Twitch chat ingest: existing IRC channel handler points at `irc.chat.twitch.tv`, `chime_in` calls TTS with the chat author's name pronounced.
132
+ - Hard switch: `kill_stage()` immediately mutes the audio bus and stops all autonomous actions.
133
+
134
+ **The viral artifact:** A clip of Tony coding at 2am with TITAN periodically interrupting to point out a typo or read a chat question in a deadpan-Tony voice. Cross-posts perfectly: r/Twitch, r/programming, r/OBS, r/selfhosted. The clip goes viral when TITAN says something genuinely funny — which it will, because it's running on Tony's persona prompt.
135
+
136
+ ---
137
+
138
+ ### 8. Dad Mode — A Family-Safe Layer Tony Actually Wants
139
+
140
+ **The pitch:** A separate persona profile + content filter + voice clone consent layer + scheduled "wind-down" mode that turns TITAN into something Tony's kids can talk to, that posts homework reminders, reads bedtime stories in dad's cloned voice, and *physically refuses* to do work tasks between 6pm and 9pm.
141
+
142
+ **The hook:** "my AI clocks out at dinner. it reads my kid bedtime stories in my voice when I'm working late. it's the only LLM product designed by a dad."
143
+
144
+ **Why TITAN can ship this and competitors can't:** TITAN has F5-TTS voice cloning, 16 channels (kids can talk to it via the family iPad's Telegram), Soma's Safety drive (which already has logic for "should I be doing this right now"), and the Approval Gates safety layer. Most importantly: Tony is a dad. Anthropic, OpenAI, Cursor, Mastra, Vercel — none of them ship a feature explicitly for the parent-of-young-kids segment. *That's a 200M-person market with zero AI products targeting them.* This is the feature that makes non-coder parents buy a $40 Amazon tablet to mount on the fridge.
145
+
146
+ **MVP scope:**
147
+ - New `src/agent/personaProfiles.ts` — multiple personas keyed by channel + time-of-day + caller identity. Schema: `{ name, voiceId, allowedTools[], bannedPhrases[], schedule: cron, persona prompt }`.
148
+ - Ships with three: *Worker* (default, all tools), *Dad* (family-safe, 6-9pm only, no shell/code/posting tools), *Storyteller* (reads from a `~/.titan/stories/` folder in cloned voice, never does anything else).
149
+ - Channel-level pinning: messages from the iPad's Telegram bot → forced *Dad* persona with `bedtime_story`, `homework_reminder`, `weather_kid`, `silly_fact` tools only.
150
+ - "Wind-down" mode: `safety.windDown: { from: '18:00', to: '21:00' }` — gates *all* autopilot, *all* Facebook posting, *all* shell tools. Soma's Social drive instead emits proposals like "tell Tony to put the phone down."
151
+ - `bedtime_story.ts` skill: picks a 500-1000 word story, narrates with F5-TTS in the configured voice, optional ambient music bed.
152
+
153
+ **The viral artifact:** Tony's wife films Tony's daughter asking the iPad on the fridge for a bedtime story. TITAN reads one in Tony's voice. Tony is at the kitchen table working. The kid doesn't notice. *"tony's at his desk. titan's at bedtime. both versions of dad showed up tonight."* The post that turns TITAN from "another agent framework" into a *cultural object*. Mom Twitter discovers it. Dad Twitter cries about it. The repo gets 5,000 stars in a week.
154
+
155
+ ---
156
+
157
+ ## Top 2 picks (per the agent that drafted this)
158
+
159
+ **#1 Dream Mode** is the strongest install-driver per line of code. It uses *only* parts TITAN already has (trajectory log, drive ring buffer, F5-TTS, daemon scheduler), the demo is impossible to ignore on Twitter, and the "AI wrote about its day in your own voice" hook is unique to TITAN's stack — nobody else can clone it without rebuilding three subsystems. Two weeks of focused work, 5,000+ stars upside.
160
+
161
+ **#8 Dad Mode** is the strategic moat. Every other agent framework targets developers; targeting *parents* with a feature only a dad-developer would think to build creates a category nobody else can credibly compete in. It also serves as a Trojan horse: install Dad Mode for the bedtime story, end up running TITAN's full agent stack on your homelab. The viral artifact (kid talking to dad's voice on the fridge) is the kind of thing that turns a framework into a brand.
162
+
163
+ Honorable mention: **#3 Voice Twin** is the highest-ceiling viral feature but carries real abuse-vector risk and needs the consent/allowlist plumbing right before it can ship publicly. Worth building, ship after #1 and #8.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "titan-agent",
3
- "version": "5.5.13",
3
+ "version": "5.5.14",
4
4
  "description": "TITAN — Autonomous AI agent framework with self-improvement, multi-agent orchestration, 36 LLM providers, 16 channel adapters, GPU VRAM management, mesh networking, LiveKit voice, TITAN-Soma homeostatic drives, and a React Mission Control dashboard. Open-source, TypeScript, MIT licensed.",
5
5
  "author": "Tony Elliott (https://github.com/Djtony707)",
6
6
  "repository": {