mcp-researchpowerpack 6.0.0 → 6.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/mcp-use.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "includeInspector": false,
3
- "buildTime": "2026-04-21T16:58:09.161Z",
4
- "buildId": "83a2f56bdd7064dd",
3
+ "buildTime": "2026-04-21T20:02:58.381Z",
4
+ "buildId": "2d035686c42d62f1",
5
5
  "entryPoint": "dist/index.js",
6
6
  "widgets": {}
7
7
  }
@@ -18,7 +18,9 @@ const llmHealth = {
18
18
  lastPlannerCheckedAt: null,
19
19
  lastExtractorCheckedAt: null,
20
20
  lastPlannerError: null,
21
- lastExtractorError: null
21
+ lastExtractorError: null,
22
+ consecutivePlannerFailures: 0,
23
+ consecutiveExtractorFailures: 0
22
24
  };
23
25
  function markLLMSuccess(kind) {
24
26
  const ts = (/* @__PURE__ */ new Date()).toISOString();
@@ -26,10 +28,12 @@ function markLLMSuccess(kind) {
26
28
  llmHealth.lastPlannerOk = true;
27
29
  llmHealth.lastPlannerCheckedAt = ts;
28
30
  llmHealth.lastPlannerError = null;
31
+ llmHealth.consecutivePlannerFailures = 0;
29
32
  } else {
30
33
  llmHealth.lastExtractorOk = true;
31
34
  llmHealth.lastExtractorCheckedAt = ts;
32
35
  llmHealth.lastExtractorError = null;
36
+ llmHealth.consecutiveExtractorFailures = 0;
33
37
  }
34
38
  }
35
39
  function markLLMFailure(kind, err) {
@@ -39,10 +43,12 @@ function markLLMFailure(kind, err) {
39
43
  llmHealth.lastPlannerOk = false;
40
44
  llmHealth.lastPlannerCheckedAt = ts;
41
45
  llmHealth.lastPlannerError = message;
46
+ llmHealth.consecutivePlannerFailures += 1;
42
47
  } else {
43
48
  llmHealth.lastExtractorOk = false;
44
49
  llmHealth.lastExtractorCheckedAt = ts;
45
50
  llmHealth.lastExtractorError = message;
51
+ llmHealth.consecutiveExtractorFailures += 1;
46
52
  }
47
53
  }
48
54
  function getLLMHealth() {
@@ -57,7 +63,9 @@ function getLLMHealth() {
57
63
  // Static capability — based on env presence at boot. Runtime health (above)
58
64
  // tells whether the last attempt actually succeeded.
59
65
  plannerConfigured: cap.llmExtraction,
60
- extractorConfigured: cap.llmExtraction
66
+ extractorConfigured: cap.llmExtraction,
67
+ consecutivePlannerFailures: llmHealth.consecutivePlannerFailures,
68
+ consecutiveExtractorFailures: llmHealth.consecutiveExtractorFailures
61
69
  };
62
70
  }
63
71
  function _resetLLMHealthForTests() {
@@ -67,6 +75,8 @@ function _resetLLMHealthForTests() {
67
75
  llmHealth.lastExtractorCheckedAt = null;
68
76
  llmHealth.lastPlannerError = null;
69
77
  llmHealth.lastExtractorError = null;
78
+ llmHealth.consecutivePlannerFailures = 0;
79
+ llmHealth.consecutiveExtractorFailures = 0;
70
80
  }
71
81
  const LLM_RETRY_CONFIG = {
72
82
  maxRetries: 2,
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/services/llm-processor.ts"],
4
- "sourcesContent": ["/**\n * LLM Processor for content extraction\n * Uses OpenRouter via OPENROUTER_API_KEY for AI-powered content filtering\n * Implements robust retry logic and NEVER throws\n */\n\nimport OpenAI from 'openai';\nimport { LLM_EXTRACTION, getCapabilities } from '../config/index.js';\nimport {\n classifyError,\n sleep,\n ErrorCode,\n withStallProtection,\n type StructuredError,\n} from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\n\n/** Maximum input characters for LLM processing (~25k tokens) */\nconst MAX_LLM_INPUT_CHARS = 100_000 as const;\n\n/** LLM client timeout in milliseconds */\nconst LLM_CLIENT_TIMEOUT_MS = 120_000 as const;\n\n/** Jitter factor for exponential backoff */\nconst BACKOFF_JITTER_FACTOR = 0.3 as const;\n\n/** Stall detection timeout \u2014 abort if no response in this time */\nconst LLM_STALL_TIMEOUT_MS = 15_000 as const;\n\n/** Hard request deadline for LLM calls */\nconst LLM_REQUEST_DEADLINE_MS = 30_000 as const;\n\n// ============================================================================\n// LLM health tracking \u2014 surfaced via health://status so capability-aware\n// clients can branch on degraded mode without parsing per-call footers.\n// ============================================================================\n\ntype LLMHealthKind = 'planner' | 'extractor';\n\ninterface LLMHealthSnapshot {\n readonly lastPlannerOk: boolean;\n readonly lastExtractorOk: boolean;\n readonly lastPlannerCheckedAt: string | null;\n readonly lastExtractorCheckedAt: string | null;\n readonly lastPlannerError: string | null;\n readonly lastExtractorError: string | null;\n readonly plannerConfigured: boolean;\n readonly extractorConfigured: boolean;\n}\n\nconst llmHealth = {\n lastPlannerOk: false,\n lastExtractorOk: false,\n lastPlannerCheckedAt: null as string | null,\n lastExtractorCheckedAt: null as string | null,\n lastPlannerError: null as string | null,\n lastExtractorError: null as string | null,\n};\n\nexport function markLLMSuccess(kind: LLMHealthKind): void {\n const ts = new Date().toISOString();\n if (kind === 'planner') {\n llmHealth.lastPlannerOk = true;\n llmHealth.lastPlannerCheckedAt = ts;\n llmHealth.lastPlannerError = null;\n } else {\n llmHealth.lastExtractorOk = true;\n llmHealth.lastExtractorCheckedAt = ts;\n llmHealth.lastExtractorError = null;\n }\n}\n\nexport function markLLMFailure(kind: LLMHealthKind, err: unknown): void {\n const ts = new Date().toISOString();\n const message = err instanceof Error ? err.message : String(err ?? 'unknown error');\n if (kind === 'planner') {\n llmHealth.lastPlannerOk = false;\n llmHealth.lastPlannerCheckedAt = ts;\n llmHealth.lastPlannerError = message;\n } else {\n llmHealth.lastExtractorOk = false;\n llmHealth.lastExtractorCheckedAt = ts;\n llmHealth.lastExtractorError = message;\n }\n}\n\nexport function getLLMHealth(): LLMHealthSnapshot {\n const cap = getCapabilities();\n return {\n lastPlannerOk: llmHealth.lastPlannerOk,\n lastExtractorOk: llmHealth.lastExtractorOk,\n lastPlannerCheckedAt: llmHealth.lastPlannerCheckedAt,\n lastExtractorCheckedAt: llmHealth.lastExtractorCheckedAt,\n lastPlannerError: llmHealth.lastPlannerError,\n lastExtractorError: llmHealth.lastExtractorError,\n // Static capability \u2014 based on env presence at boot. Runtime health (above)\n // tells whether the last attempt actually succeeded.\n plannerConfigured: cap.llmExtraction,\n extractorConfigured: cap.llmExtraction,\n };\n}\n\n/** Test-only \u2014 reset state between tests. Not exported from index. */\nexport function _resetLLMHealthForTests(): void {\n llmHealth.lastPlannerOk = false;\n llmHealth.lastExtractorOk = false;\n llmHealth.lastPlannerCheckedAt = null;\n llmHealth.lastExtractorCheckedAt = null;\n llmHealth.lastPlannerError = null;\n llmHealth.lastExtractorError = null;\n}\n\ninterface ProcessingConfig {\n readonly enabled: boolean;\n readonly extract: string | undefined;\n readonly url?: string;\n}\n\ninterface LLMResult {\n readonly content: string;\n readonly processed: boolean;\n readonly error?: string;\n readonly errorDetails?: StructuredError;\n}\n\n// LLM-specific retry configuration\nconst LLM_RETRY_CONFIG = {\n maxRetries: 2,\n baseDelayMs: 1000,\n maxDelayMs: 5000,\n} as const;\n\n// OpenRouter/OpenAI specific retryable error codes (using Set for type-safe lookup)\nconst RETRYABLE_LLM_ERROR_CODES = new Set([\n 'rate_limit_exceeded',\n 'server_error',\n 'timeout',\n 'service_unavailable',\n]);\n\n/** Type guard for errors with an HTTP status code */\nfunction hasStatus(error: unknown): error is { status: number } {\n return (\n typeof error === 'object' &&\n error !== null &&\n 'status' in error &&\n typeof (error as Record<string, unknown>).status === 'number'\n );\n}\n\nlet llmClient: OpenAI | null = null;\n\ntype OpenAITextGenerator = Pick<OpenAI, 'chat'>;\n\nexport function createLLMProcessor(): OpenAI | null {\n if (!getCapabilities().llmExtraction) return null;\n\n if (!llmClient) {\n llmClient = new OpenAI({\n baseURL: LLM_EXTRACTION.BASE_URL,\n apiKey: LLM_EXTRACTION.API_KEY,\n timeout: LLM_CLIENT_TIMEOUT_MS,\n maxRetries: 0,\n defaultHeaders: { 'X-Title': 'mcp-research-powerpack' },\n });\n mcpLog('info', `LLM extraction configured (model: ${LLM_EXTRACTION.MODEL}, baseURL: ${LLM_EXTRACTION.BASE_URL})`, 'llm');\n }\n return llmClient;\n}\n\nfunction buildChatRequestBody(model: string, prompt: string): Record<string, unknown> {\n const requestBody: Record<string, unknown> = {\n model,\n messages: [{ role: 'user', content: prompt }],\n };\n\n if (LLM_EXTRACTION.REASONING_EFFORT !== 'none') {\n requestBody.reasoning_effort = LLM_EXTRACTION.REASONING_EFFORT;\n }\n\n return requestBody;\n}\n\nexport async function requestText(\n processor: OpenAITextGenerator,\n prompt: string,\n operationLabel: string,\n signal?: AbortSignal,\n): Promise<{ content: string | null; model: string; error?: string }> {\n const model = LLM_EXTRACTION.MODEL;\n\n try {\n const response = await withStallProtection(\n (stallSignal) => processor.chat.completions.create(\n buildChatRequestBody(model, prompt) as unknown as OpenAI.ChatCompletionCreateParamsNonStreaming,\n {\n signal: signal ? AbortSignal.any([stallSignal, signal]) : stallSignal,\n timeout: LLM_REQUEST_DEADLINE_MS,\n },\n ),\n LLM_STALL_TIMEOUT_MS,\n 3,\n `${operationLabel} (${model})`,\n );\n\n const content = response.choices?.[0]?.message?.content?.trim();\n if (content) {\n return { content, model };\n }\n\n const err = `Empty response from model ${model}`;\n mcpLog('warning', `${operationLabel} returned empty content for model ${model}`, 'llm');\n return { content: null, model, error: err };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('warning', `${operationLabel} failed for model ${model}: ${message}`, 'llm');\n return { content: null, model, error: message };\n }\n}\n\n/**\n * Check if an LLM error is retryable\n */\nfunction isRetryableLLMError(error: unknown): boolean {\n if (!error || typeof error !== 'object') return false;\n\n // Stall/timeout protection errors - always retry these\n const stallCode = (error as { code?: string })?.code;\n if (stallCode === 'ESTALLED' || stallCode === 'ETIMEDOUT') {\n return true;\n }\n\n // Check HTTP status codes\n if (hasStatus(error)) {\n if (error.status === 429 || error.status === 500 || error.status === 502 || error.status === 503 || error.status === 504) {\n return true;\n }\n }\n\n // Check error codes from OpenAI/OpenRouter\n const record = error as Record<string, unknown>;\n const code = typeof record.code === 'string' ? record.code : undefined;\n const nested =\n typeof record.error === 'object' && record.error !== null\n ? (record.error as Record<string, unknown>)\n : null;\n const errorCode =\n code ??\n (nested && typeof nested.code === 'string' ? nested.code : undefined) ??\n (nested && typeof nested.type === 'string' ? nested.type : undefined);\n if (errorCode && RETRYABLE_LLM_ERROR_CODES.has(errorCode)) {\n return true;\n }\n\n // Check message for common patterns\n const message = typeof record.message === 'string' ? record.message.toLowerCase() : '';\n if (\n message.includes('rate limit') ||\n message.includes('timeout') ||\n message.includes('timed out') ||\n message.includes('service unavailable') ||\n message.includes('server error') ||\n message.includes('connection') ||\n message.includes('econnreset')\n ) {\n return true;\n }\n\n return false;\n}\n\n/**\n * Calculate backoff delay with jitter for LLM retries\n */\nfunction calculateLLMBackoff(attempt: number): number {\n const exponentialDelay = LLM_RETRY_CONFIG.baseDelayMs * Math.pow(2, attempt);\n const jitter = Math.random() * BACKOFF_JITTER_FACTOR * exponentialDelay;\n return Math.min(exponentialDelay + jitter, LLM_RETRY_CONFIG.maxDelayMs);\n}\n\n/**\n * Process content with LLM extraction\n * NEVER throws - always returns a valid LLMResult\n * Implements retry logic with exponential backoff for transient failures\n */\nexport async function processContentWithLLM(\n content: string,\n config: ProcessingConfig,\n processor?: OpenAI | null,\n signal?: AbortSignal\n): Promise<LLMResult> {\n // Early returns for invalid/skip conditions\n if (!config.enabled) {\n return { content, processed: false };\n }\n\n if (!processor) {\n return {\n content,\n processed: false,\n error: 'LLM processor not available (LLM_EXTRACTION_API_KEY or OPENROUTER_API_KEY not set)',\n errorDetails: {\n code: ErrorCode.AUTH_ERROR,\n message: 'LLM processor not available',\n retryable: false,\n },\n };\n }\n\n if (!content?.trim()) {\n return { content: content || '', processed: false, error: 'Empty content provided' };\n }\n\n // Truncate extremely long content to avoid token limits\n const truncatedContent = content.length > MAX_LLM_INPUT_CHARS\n ? content.substring(0, MAX_LLM_INPUT_CHARS) + '\\n\\n[Content truncated due to length]'\n : content;\n\n // Sanitize URL before sending to LLM: drop query string and fragment\n // so signed URLs, session tokens, auth params, or tracking hashes never\n // land in a third-party LLM prompt. Keep origin + path for page-type classification.\n const safeUrl = (() => {\n if (!config.url) return undefined;\n try {\n const u = new URL(config.url);\n return `${u.origin}${u.pathname}`;\n } catch {\n return undefined;\n }\n })();\n const urlLine = safeUrl ? `PAGE URL: ${safeUrl}\\n\\n` : '';\n\n const prompt = config.extract\n ? `You are a factual extractor for a research agent. Extract ONLY the information that matches the instruction below. Do not summarize, interpret, or editorialize.\n\n${urlLine}EXTRACTION INSTRUCTION: ${config.extract}\n\nSTEP 1 \u2014 Classify this page. Look at the URL if present, plus structural cues (code blocks, table patterns, comment threads, marketing copy). Pick ONE:\n\\`docs | changelog | github-readme | github-thread | reddit | hackernews | forum | blog | marketing | announcement | qa | cve | paper | release-notes | other\\`\n\nSTEP 2 \u2014 Adjust emphasis by page type:\n- docs / changelog / github-readme / release-notes \u2192 API signatures, version numbers, flags, exact config keys, code blocks. Copy verbatim. Preserve tables as tables.\n- github-thread \u2192 weight MAINTAINER comments (label \"[maintainer]\") over drive-by commenters. Preserve stacktraces verbatim. Capture chronological resolution \u2014 what was decided and when. Link the accepted-fix commit/PR if referenced.\n- reddit / hackernews / forum \u2192 lived experience. Quote verbatim with attribution (\"u/foo wrote: \u2026\" or \"user <name>\"). Prioritize replies with stack details, specific failure stories, or replies that contradict the OP. Record overall sentiment distribution as one bullet if clear skew (\"~70% agree / ~20% dissent / rest off-topic\"). Drop context-free opinions (\"this sucks\") from Matches.\n- blog \u2192 prioritize concrete reproductions, code, measurements. If the author makes a claim without evidence, mark \"[unsourced claim]\".\n- marketing / announcement \u2192 pricing tiers, feature matrices verbatim, free-tier quotas, enterprise contact. Preserve tables as tables. Treat roadmap/future-tense claims skeptically \u2014 note them as \"[announced, not shipped]\" when framing is future-tense.\n- qa (stackoverflow) \u2192 accepted answer's code + high-voted disagreements. Always note the answer date \u2014 SO rots.\n- cve \u2192 CVSS vector verbatim, CWE, CPE ranges, affected versions, fix version, references. Each with its label.\n- paper \u2192 claim, method, dataset, benchmark numbers, comparison baseline. Preserve numeric deltas verbatim.\n\nSTEP 3 \u2014 Emit markdown with these sections, in order:\n\n## Source\n- URL: <verbatim if visible, else \"unknown\">\n- Page type: <the type you picked>\n- Page date: <verbatim if visible, else \"not visible\">\n- Author / maintainer (if identifiable): <verbatim>\n\n## Matches\nOne bullet per distinct piece of matching info:\n- **<short label>** \u2014 the information. Quote VERBATIM for: numbers, versions, dates, API names, prices, error messages, stacktraces, CVSS vectors, benchmark scores, command flags, proper nouns, and people's words. Backticks for code/identifiers. Preserve tables.\n\n## Not found\nEvery part of the extraction instruction this page did NOT answer. Be explicit. Example: \"Enterprise pricing contact \u2014 not present on this page.\"\n\n## Follow-up signals\nShort bullets \u2014 NEW angles this page surfaced that the agent should investigate. Include: new terms, unexpected vendor names, contradicting claims, referenced-but-unscraped URLs. Copy URLs VERBATIM from the source; if only anchor text is visible, write \"anchor: <text> (URL not in scraped content)\". Skip this section if nothing new surfaced. Do NOT invent.\n\n## Contradictions\n(Include this section only if the page contains internally contradictory claims.) Bullet each contradiction with both sides quoted verbatim.\n\n## Truncation\n(Include only if content appears cut mid-element.) \"Content cut mid-<table row / code block / comment / paragraph>; extraction may be incomplete for <section>.\"\n\nRULES:\n- Never paraphrase numbers, versions, code, or quoted text.\n- If an instruction item is not answered, it goes in \"Not found\" \u2014 do NOT invent an answer to please the caller.\n- Preserve code blocks, command examples, tables exactly.\n- Do NOT add commentary or recommendations outside \"Follow-up signals\".\n- Page language \u2260 English: quote verbatim in the original language AND provide a parenthetical gloss in English.\n- Content clearly failed to load: return ONLY a single line, choosing from:\n \\`## Matches\\\\n_Page did not load: 404_\\`\n \\`## Matches\\\\n_Page did not load: login-wall_\\`\n \\`## Matches\\\\n_Page did not load: paywall_\\`\n \\`## Matches\\\\n_Page did not load: JS-render-empty_\\`\n \\`## Matches\\\\n_Page did not load: non-text-asset_\\`\n \\`## Matches\\\\n_Page did not load: truncated-before-relevant-section_\\`\n\nContent:\n${truncatedContent}`\n : `Clean the following page content: drop navigation, ads, cookie banners, footers, author bios, related-article lists. Preserve headings, paragraphs, code blocks, tables, and inline links as \\`[text](url)\\`. Do NOT summarize \u2014 preserve the full body.\n\n${urlLine}Content:\n${truncatedContent}`;\n\n let lastError: StructuredError | undefined;\n\n // Retry loop\n for (let attempt = 0; attempt <= LLM_RETRY_CONFIG.maxRetries; attempt++) {\n try {\n if (attempt === 0) {\n mcpLog('info', `Starting extraction with ${LLM_EXTRACTION.MODEL}`, 'llm');\n } else {\n mcpLog('warning', `Retry attempt ${attempt}/${LLM_RETRY_CONFIG.maxRetries}`, 'llm');\n }\n\n const response = await requestText(\n processor,\n prompt,\n 'LLM extraction',\n signal,\n );\n\n if (response.content) {\n mcpLog('info', `Successfully extracted ${response.content.length} characters`, 'llm');\n markLLMSuccess('extractor');\n return { content: response.content, processed: true };\n }\n\n // Empty response - not retryable\n mcpLog('warning', 'Received empty response from LLM', 'llm');\n markLLMFailure('extractor', 'LLM returned empty response');\n return {\n content,\n processed: false,\n error: 'LLM returned empty response',\n errorDetails: {\n code: ErrorCode.INTERNAL_ERROR,\n message: 'LLM returned empty response',\n retryable: false,\n },\n };\n\n } catch (err: unknown) {\n lastError = classifyError(err);\n\n // Log the error\n const status = hasStatus(err) ? err.status : undefined;\n const code = typeof err === 'object' && err !== null && 'code' in err\n ? String((err as Record<string, unknown>).code)\n : undefined;\n mcpLog('error', `Error (attempt ${attempt + 1}): ${lastError.message} [status=${status}, code=${code}, retryable=${isRetryableLLMError(err)}]`, 'llm');\n\n // Check if we should retry\n if (isRetryableLLMError(err) && attempt < LLM_RETRY_CONFIG.maxRetries) {\n const delayMs = calculateLLMBackoff(attempt);\n mcpLog('warning', `Retrying in ${delayMs}ms...`, 'llm');\n try { await sleep(delayMs, signal); } catch { break; }\n continue;\n }\n\n // Non-retryable or max retries reached\n break;\n }\n }\n\n // All attempts failed - return original content with error info\n const errorMessage = lastError?.message || 'Unknown LLM error';\n mcpLog('error', `All attempts failed: ${errorMessage}. Returning original content.`, 'llm');\n markLLMFailure('extractor', errorMessage);\n\n return {\n content, // Return original content as fallback\n processed: false,\n error: `LLM extraction failed: ${errorMessage}`,\n errorDetails: lastError || {\n code: ErrorCode.UNKNOWN_ERROR,\n message: errorMessage,\n retryable: false,\n },\n };\n}\n\n// ============================================================================\n// Web-Search Result Classification\n// ============================================================================\n\n/** Maximum URLs to send to the LLM for classification */\nconst MAX_CLASSIFICATION_URLS = 50 as const;\n\n/** Classification tiers */\ntype ClassificationTier = 'HIGHLY_RELEVANT' | 'MAYBE_RELEVANT' | 'OTHER';\n\nexport interface ClassificationEntry {\n readonly rank: number;\n readonly tier: ClassificationTier;\n readonly source_type?: string;\n readonly reason?: string;\n}\n\nexport interface ClassificationGap {\n readonly id: number;\n readonly description: string;\n}\n\nexport interface ClassificationResult {\n readonly title: string;\n readonly synthesis: string;\n readonly results: ClassificationEntry[];\n readonly refine_queries?: Array<{\n readonly query: string;\n readonly rationale: string;\n readonly gap_id?: number;\n }>;\n readonly confidence?: 'high' | 'medium' | 'low';\n readonly confidence_reason?: string;\n readonly gaps?: ClassificationGap[];\n}\n\nexport interface RefineQuerySuggestion {\n readonly query: string;\n readonly rationale: string;\n readonly gap_id?: number;\n readonly gap_description?: string;\n}\n\n/**\n * Classify web-search results by relevance to an objective using the LLM.\n * Sends only titles, snippets, and domain names \u2014 does NOT fetch URLs.\n * Returns null on failure (caller should fall back to raw output).\n */\nexport async function classifySearchResults(\n rankedUrls: ReadonlyArray<{\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n readonly snippet: string;\n readonly frequency: number;\n readonly queries: string[];\n }>,\n objective: string,\n totalQueries: number,\n processor: OpenAI,\n previousQueries: readonly string[] = [],\n): Promise<{ result: ClassificationResult | null; error?: string }> {\n const urlsToClassify = rankedUrls.slice(0, MAX_CLASSIFICATION_URLS);\n\n // Descending static weights fed to the LLM. Higher-ranked URLs get a bigger\n // weight so the classifier biases HIGHLY_RELEVANT toward them. The weights\n // here are a shown-to-LLM summary, not the internal CTR ranking (which\n // still runs in url-aggregator.ts). Rank 11+ all bucket to w=1.\n const STATIC_WEIGHTS = [30, 20, 15, 10, 8, 6, 5, 4, 3, 2] as const;\n const weightForRank = (rank: number): number => STATIC_WEIGHTS[rank - 1] ?? 1;\n\n // Build compressed result list \u2014 weight + title + domain + snippet (truncated)\n const lines: string[] = [];\n for (const url of urlsToClassify) {\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n const snippet = url.snippet.length > 120\n ? url.snippet.slice(0, 117) + '...'\n : url.snippet;\n lines.push(`[${url.rank}] w=${weightForRank(url.rank)} ${url.title} \u2014 ${domain} \u2014 ${snippet}`);\n }\n\n const prevQueriesBlock = previousQueries.length > 0\n ? previousQueries.map((q) => `- ${q}`).join('\\n')\n : '- (none provided)';\n const today = new Date().toISOString().slice(0, 10);\n\n const prompt = `You are the relevance filter for a research agent. Classify each search result below against the objective and produce a structured analysis.\n\nOBJECTIVE: ${objective}\nTODAY: ${today}\n\nPREVIOUS QUERIES (already run \u2014 do NOT paraphrase in refine_queries):\n${prevQueriesBlock}\n\nReturn ONLY a JSON object (no markdown, no code fences):\n\n{\n \"title\": \"2\u20138 word label for this RESULT CLUSTER (not the objective)\",\n \"synthesis\": \"3\u20135 sentences grounded in the results. Every non-trivial claim cites a rank in [brackets], e.g. '[3] documents the flag; [7][12] report it is broken on macOS.' A synthesis with zero citations is invalid.\",\n \"confidence\": \"high | medium | low\",\n \"confidence_reason\": \"one sentence \u2014 why\",\n \"gaps\": [\n { \"id\": 0, \"description\": \"specific, actionable thing the current results do NOT answer \u2014 not 'more info needed'\" }\n ],\n \"refine_queries\": [\n { \"query\": \"concrete next search\", \"gap_id\": 0, \"rationale\": \"\u226412 words\" }\n ],\n \"results\": [\n {\n \"rank\": 1,\n \"tier\": \"HIGHLY_RELEVANT | MAYBE_RELEVANT | OTHER\",\n \"source_type\": \"vendor_doc | github | reddit | hackernews | blog | news | marketing | stackoverflow | cve | paper | release_notes | aggregator | other\",\n \"reason\": \"\u226412 words citing the snippet cue that drove the tier\"\n }\n ]\n}\n\nWEIGHT SCHEME: each row is prefixed with a weight (w=N). Higher weight means the URL ranked better across input queries \u2014 prefer HIGHLY_RELEVANT for high-weight rows when content matches the objective. Weight alone never justifies HIGHLY_RELEVANT; snippet cues still drive the decision.\n\nSOURCE-OF-TRUTH RUBRIC (the \"primary source\" is goal-dependent \u2014 infer goal type from the objective):\n- spec / API / config questions \u2192 vendor_doc, github (README, RFC), release_notes are primary\n- bug / failure-mode questions \u2192 github (issue/PR), stackoverflow are primary\n- migration / sentiment / lived-experience \u2192 reddit, hackernews, blog are primary; docs are secondary\n- pricing / commercial \u2192 marketing (the vendor's own pricing page IS the primary source, but treat feature lists skeptically)\n- security / CVE \u2192 cve databases, distro security trackers (nvd.nist.gov, security-tracker.debian.org, ubuntu.com/security) are primary\n- synthesis / open-ended \u2192 blend; no single type is primary\n- product launch \u2192 vendor_doc + news + marketing for the launch itself; blogs + reddit for independent verification\n\nFRESHNESS: proportional to topic velocity. For a week-old release, demote anything older than 30 days. For general tech questions, demote older than 18 months. For stable protocols (HTTP, TCP, POSIX), don't demote by age.\n\nCONFIDENCE:\n- high = \u22653 HIGHLY_RELEVANT results from INDEPENDENT domains agree on the core answer\n- medium = \u22652 HIGHLY_RELEVANT exist but disagree or share a domain; OR a single authoritative primary source answers it\n- low = otherwise; snippet-only judgments cap at medium\n\nREFINE QUERIES \u2014 each MUST differ from every previousQuery by:\n- a new operator (site:, quotes, verbatim version number), OR\n- a domain-specific noun ABSENT from every prior query\nAdding a year alone does NOT count as differentiation.\nEach refine_query MUST reference a specific gap_id from the gaps array above.\nProduce 4\u20138 refine_queries total. Cover: (a) a primary-source probe, (b) a temporal sharpener, (c) a failure-mode or comparison probe, (d) at least one new-term probe seeded by a specific result's snippet.\n\nRULES:\n- Classify ALL ${urlsToClassify.length} results. Do not skip or collapse any.\n- Use only the three tier values.\n- Judge from title + domain + snippet only. Do NOT invent facts not present in the snippet.\n- If ALL results are OTHER: synthesis = \"\", confidence = \"low\", and \\`gaps\\` must explicitly state why the current queries missed the target.\n- Casing: tier = UPPERCASE_WITH_UNDERSCORES, confidence = lowercase.\n\nSEARCH RESULTS (${urlsToClassify.length} URLs from ${totalQueries} queries):\n${lines.join('\\n')}`;\n\n try {\n mcpLog('info', `Classifying ${urlsToClassify.length} URLs against objective`, 'llm');\n\n const response = await requestText(\n processor,\n prompt,\n 'Search classification',\n );\n\n if (!response.content) {\n const errMsg = response.error ?? 'LLM returned empty classification response';\n markLLMFailure('planner', errMsg);\n return { result: null, error: errMsg };\n }\n\n // Strip markdown code fences if present\n const cleaned = response.content.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as ClassificationResult;\n\n // Validate the response shape.\n // Note: synthesis is typed not truthy \u2014 the prompt explicitly instructs an empty string\n // for the all-OTHER case, and we must not reject that.\n if (!parsed.title || typeof parsed.synthesis !== 'string' || !Array.isArray(parsed.results)) {\n const errMsg = 'LLM response missing required fields (title, synthesis, results)';\n markLLMFailure('planner', errMsg);\n return { result: null, error: errMsg };\n }\n\n mcpLog('info', `Classification complete: ${parsed.results.filter(r => r.tier === 'HIGHLY_RELEVANT').length} highly relevant`, 'llm');\n markLLMSuccess('planner');\n return { result: parsed };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('error', `Classification failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return { result: null, error: `Classification failed: ${message}` };\n }\n}\n\nexport async function suggestRefineQueriesForRawMode(\n rankedUrls: ReadonlyArray<{\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n }>,\n objective: string,\n originalQueries: readonly string[],\n processor: OpenAI,\n): Promise<{ result: RefineQuerySuggestion[]; error?: string }> {\n const urlsToSummarize = rankedUrls.slice(0, 12);\n const lines = urlsToSummarize.map((url) => {\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n return `[${url.rank}] ${url.title} \u2014 ${domain}`;\n });\n\n const prompt = `You are generating follow-up search queries for an agent using raw web-search results.\n\nReturn ONLY a JSON object (no markdown, no code fences):\n{\n \"refine_queries\": [\n { \"query\": \"next search query\", \"gap_description\": \"what gap this closes\", \"rationale\": \"\u226412 words on why\" }\n ]\n}\n\nOBJECTIVE: ${objective}\n\nPREVIOUS QUERIES (already run \u2014 do NOT paraphrase):\n${originalQueries.map((query) => `- ${query}`).join('\\n')}\n\nTOP RESULT TITLES (to seed new-term probes):\n${lines.join('\\n')}\n\nRULES:\n- Produce 4\u20136 diverse follow-ups. Cover: (a) a primary-source probe (site:, RFC, vendor docs); (b) a temporal sharpener (changelog, version number); (c) a failure-mode or comparison probe; (d) at least one new-term probe seeded by a specific result title.\n- Each query MUST differ from every previousQuery by either a new operator (site:, quotes, a verbatim version number) OR a domain-specific noun absent from every prior query. Adding a year alone does NOT count.\n- Each refine_query MUST include a \\`gap_description\\` naming what the current results don't answer.\n- Do not include URLs.\n- Keep rationales \u226412 words.`;\n\n try {\n const response = await requestText(\n processor,\n prompt,\n 'Raw-mode refine query generation',\n );\n\n if (!response.content) {\n const errMsg = response.error ?? 'LLM returned empty raw-mode refine query response';\n markLLMFailure('planner', errMsg);\n return { result: [], error: errMsg };\n }\n\n const cleaned = response.content.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as { refine_queries?: RefineQuerySuggestion[] };\n\n markLLMSuccess('planner');\n return { result: Array.isArray(parsed.refine_queries) ? parsed.refine_queries : [] };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('error', `Raw-mode refine query generation failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return { result: [], error: message };\n }\n}\n\n// ============================================================================\n// Research Brief \u2014 goal-aware orientation (called by start-research)\n// ============================================================================\n\nexport type PrimaryBranch = 'reddit' | 'web' | 'both';\n\nexport interface ResearchBriefStep {\n readonly tool: 'web-search' | 'scrape-links';\n readonly reason: string;\n}\n\nexport interface ResearchBrief {\n readonly goal_class: string;\n readonly goal_class_reason: string;\n readonly primary_branch: PrimaryBranch;\n readonly primary_branch_reason: string;\n readonly freshness_window: string;\n readonly first_call_sequence: readonly ResearchBriefStep[];\n readonly keyword_seeds: readonly string[];\n readonly iteration_hints: readonly string[];\n readonly gaps_to_watch: readonly string[];\n readonly stop_criteria: readonly string[];\n}\n\nconst VALID_GOAL_CLASSES = new Set([\n 'spec', 'bug', 'migration', 'sentiment', 'pricing', 'security',\n 'synthesis', 'product_launch', 'other',\n]);\n\nconst VALID_FRESHNESS = new Set(['days', 'weeks', 'months', 'years']);\nconst VALID_BRANCHES = new Set<PrimaryBranch>(['reddit', 'web', 'both']);\nconst VALID_STEP_TOOLS = new Set(['web-search', 'scrape-links']);\n\nfunction isStringArray(value: unknown): value is string[] {\n return Array.isArray(value) && value.every((v) => typeof v === 'string');\n}\n\nfunction isStepArray(value: unknown): value is ResearchBriefStep[] {\n return Array.isArray(value) && value.every((s) => {\n if (typeof s !== 'object' || s === null) return false;\n const tool = (s as Record<string, unknown>).tool;\n const reason = (s as Record<string, unknown>).reason;\n return typeof tool === 'string'\n && VALID_STEP_TOOLS.has(tool)\n && typeof reason === 'string'\n && reason.trim().length > 0;\n });\n}\n\nexport function parseResearchBrief(raw: string): ResearchBrief | null {\n try {\n const cleaned = raw.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as Record<string, unknown>;\n\n const goal_class = typeof parsed.goal_class === 'string' ? parsed.goal_class : null;\n if (!goal_class || !VALID_GOAL_CLASSES.has(goal_class)) return null;\n\n const freshness_window = typeof parsed.freshness_window === 'string' ? parsed.freshness_window : null;\n if (!freshness_window || !VALID_FRESHNESS.has(freshness_window)) return null;\n\n const primary_branch = parsed.primary_branch;\n if (typeof primary_branch !== 'string' || !VALID_BRANCHES.has(primary_branch as PrimaryBranch)) return null;\n\n if (!isStepArray(parsed.first_call_sequence) || parsed.first_call_sequence.length === 0) return null;\n if (!isStringArray(parsed.keyword_seeds) || parsed.keyword_seeds.length === 0) return null;\n\n return {\n goal_class,\n goal_class_reason: typeof parsed.goal_class_reason === 'string' ? parsed.goal_class_reason : '',\n primary_branch: primary_branch as PrimaryBranch,\n primary_branch_reason: typeof parsed.primary_branch_reason === 'string' ? parsed.primary_branch_reason : '',\n freshness_window,\n first_call_sequence: parsed.first_call_sequence,\n keyword_seeds: parsed.keyword_seeds.filter((s) => s.trim().length > 0),\n iteration_hints: isStringArray(parsed.iteration_hints) ? parsed.iteration_hints : [],\n gaps_to_watch: isStringArray(parsed.gaps_to_watch) ? parsed.gaps_to_watch : [],\n stop_criteria: isStringArray(parsed.stop_criteria) ? parsed.stop_criteria : [],\n };\n } catch {\n return null;\n }\n}\n\nexport async function generateResearchBrief(\n goal: string,\n processor: OpenAI,\n signal?: AbortSignal,\n): Promise<ResearchBrief | null> {\n const today = new Date().toISOString().slice(0, 10);\n\n const prompt = `You are a research planner. An agent is about to run a multi-pass research loop on the goal below using 3 tools:\n\n - web-search: fan-out Google, scope: web|reddit|both, up to 50 queries per call, parallel-callable (multiple calls per turn)\n - scrape-links: fetch URLs in parallel, auto-detects reddit.com post permalinks \u2192 Reddit API (threaded post+comments); all other URLs \u2192 HTTP scraper; parallel-callable\n\nProduce a tailored JSON brief.\n\nGOAL: ${goal}\nTODAY: ${today}\n\nReturn ONLY a JSON object (no markdown, no code fences):\n\n{\n \"goal_class\": \"spec | bug | migration | sentiment | pricing | security | synthesis | product_launch | other\",\n \"goal_class_reason\": \"one sentence \u2014 why this class\",\n \"primary_branch\": \"reddit | web | both\",\n \"primary_branch_reason\": \"one sentence \u2014 why this branch leads\",\n \"freshness_window\": \"days | weeks | months | years\",\n \"first_call_sequence\": [\n { \"tool\": \"web-search | scrape-links\", \"reason\": \"what this call establishes for the agent\" }\n ],\n \"keyword_seeds\": [\"25\u201350 concrete Google queries \u2014 flat list, to be fired in the first web-search call\"],\n \"iteration_hints\": [\"2\u20135 pointers on which harvested terms / follow-up signals to watch for after pass 1\"],\n \"gaps_to_watch\": [\"2\u20135 concrete questions the agent MUST verify or the answer is incomplete\"],\n \"stop_criteria\": [\"2\u20134 checkable conditions \u2014 all must hold before the agent declares done\"]\n}\n\nRULES:\n\nprimary_branch:\n- \"reddit\" \u2192 sentiment / migration / lived-experience / community-consensus goals. Leads with scope:\"reddit\" web-search.\n- \"web\" \u2192 spec / bug / pricing / CVE / API / primary-source goals. Leads with scope:\"web\" web-search.\n- \"both\" \u2192 opinion-heavy AND needs official sources (e.g. product launch + practitioner reception).\n\nfirst_call_sequence:\n- 1\u20133 steps.\n- reddit-first: step 1 = web-search (caller sets scope:\"reddit\"), step 2 = scrape-links on best post permalinks.\n- web-first: step 1 = web-search (scope:\"web\"), step 2 = scrape-links on HIGHLY_RELEVANT URLs.\n- both: step 1 = two parallel web-search calls (one scope:\"reddit\", one scope:\"web\"), step 2 = merged scrape-links.\n\nkeyword_seeds:\n- 25\u201350 total. Narrow bug \u2192 fewer. Open synthesis \u2192 more.\n- Use operators where helpful (site:, quotes, verbatim version numbers).\n- DIVERSE facets \u2014 same noun-phrase cannot repeat across seeds with adjectives-only variation.\n- Do NOT invent vendor names you are uncertain exist.\n\nfreshness_window:\n- If the goal mentions a recent release / date / version, use \"days\" or \"weeks\".\n- Stable protocols / APIs \u2192 \"months\" or \"years\".`;\n\n try {\n const response = await requestText(\n processor,\n prompt,\n 'Research brief generation',\n signal,\n );\n\n if (!response.content) {\n mcpLog('warning', `Research brief generation returned no content: ${response.error ?? 'unknown'}`, 'llm');\n markLLMFailure('planner', response.error ?? 'empty response');\n return null;\n }\n\n const brief = parseResearchBrief(response.content);\n if (!brief) {\n mcpLog('warning', 'Research brief JSON parse or shape validation failed', 'llm');\n markLLMFailure('planner', 'brief parse/validation failed');\n return null;\n }\n\n markLLMSuccess('planner');\n return brief;\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('warning', `Research brief generation failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return null;\n }\n}\n\nexport function renderResearchBrief(brief: ResearchBrief): string {\n const lines: string[] = [];\n\n lines.push('## Your research brief (goal-tailored)');\n lines.push('');\n lines.push(`**Goal class**: \\`${brief.goal_class}\\` \u2014 ${brief.goal_class_reason}`);\n lines.push(`**Primary branch**: \\`${brief.primary_branch}\\` \u2014 ${brief.primary_branch_reason}`);\n lines.push(`**Freshness**: \\`${brief.freshness_window}\\``);\n lines.push('');\n\n if (brief.first_call_sequence.length > 0) {\n lines.push('### First-call sequence');\n brief.first_call_sequence.forEach((step, i) => {\n lines.push(`${i + 1}. \\`${step.tool}\\` \u2014 ${step.reason}`);\n });\n lines.push('');\n }\n\n if (brief.keyword_seeds.length > 0) {\n lines.push(`### Keyword seeds (${brief.keyword_seeds.length}) \u2014 fire these in your first \\`web-search\\` call as a flat \\`queries\\` array`);\n for (const seed of brief.keyword_seeds) {\n lines.push(`- ${seed}`);\n }\n lines.push('');\n }\n\n if (brief.iteration_hints.length > 0) {\n lines.push('### Iteration hints (harvest new terms from scrape extracts\\' `## Follow-up signals`)');\n for (const hint of brief.iteration_hints) lines.push(`- ${hint}`);\n lines.push('');\n }\n\n if (brief.gaps_to_watch.length > 0) {\n lines.push('### Gaps to watch');\n for (const gap of brief.gaps_to_watch) lines.push(`- ${gap}`);\n lines.push('');\n }\n\n if (brief.stop_criteria.length > 0) {\n lines.push('### Stop criteria');\n for (const c of brief.stop_criteria) lines.push(`- ${c}`);\n lines.push('');\n }\n\n lines.push('---');\n lines.push('');\n lines.push('Fire `first_call_sequence` now. After each `scrape-links`, harvest new terms from `## Follow-up signals` and build your next `web-search` round. Stop when every gap is closed.');\n\n return lines.join('\\n');\n}\n"],
5
- "mappings": "AAMA,OAAO,YAAY;AACnB,SAAS,gBAAgB,uBAAuB;AAChD;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,cAAc;AAGvB,MAAM,sBAAsB;AAG5B,MAAM,wBAAwB;AAG9B,MAAM,wBAAwB;AAG9B,MAAM,uBAAuB;AAG7B,MAAM,0BAA0B;AAoBhC,MAAM,YAAY;AAAA,EAChB,eAAe;AAAA,EACf,iBAAiB;AAAA,EACjB,sBAAsB;AAAA,EACtB,wBAAwB;AAAA,EACxB,kBAAkB;AAAA,EAClB,oBAAoB;AACtB;AAEO,SAAS,eAAe,MAA2B;AACxD,QAAM,MAAK,oBAAI,KAAK,GAAE,YAAY;AAClC,MAAI,SAAS,WAAW;AACtB,cAAU,gBAAgB;AAC1B,cAAU,uBAAuB;AACjC,cAAU,mBAAmB;AAAA,EAC/B,OAAO;AACL,cAAU,kBAAkB;AAC5B,cAAU,yBAAyB;AACnC,cAAU,qBAAqB;AAAA,EACjC;AACF;AAEO,SAAS,eAAe,MAAqB,KAAoB;AACtE,QAAM,MAAK,oBAAI,KAAK,GAAE,YAAY;AAClC,QAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,OAAO,eAAe;AAClF,MAAI,SAAS,WAAW;AACtB,cAAU,gBAAgB;AAC1B,cAAU,uBAAuB;AACjC,cAAU,mBAAmB;AAAA,EAC/B,OAAO;AACL,cAAU,kBAAkB;AAC5B,cAAU,yBAAyB;AACnC,cAAU,qBAAqB;AAAA,EACjC;AACF;AAEO,SAAS,eAAkC;AAChD,QAAM,MAAM,gBAAgB;AAC5B,SAAO;AAAA,IACL,eAAe,UAAU;AAAA,IACzB,iBAAiB,UAAU;AAAA,IAC3B,sBAAsB,UAAU;AAAA,IAChC,wBAAwB,UAAU;AAAA,IAClC,kBAAkB,UAAU;AAAA,IAC5B,oBAAoB,UAAU;AAAA;AAAA;AAAA,IAG9B,mBAAmB,IAAI;AAAA,IACvB,qBAAqB,IAAI;AAAA,EAC3B;AACF;AAGO,SAAS,0BAAgC;AAC9C,YAAU,gBAAgB;AAC1B,YAAU,kBAAkB;AAC5B,YAAU,uBAAuB;AACjC,YAAU,yBAAyB;AACnC,YAAU,mBAAmB;AAC7B,YAAU,qBAAqB;AACjC;AAgBA,MAAM,mBAAmB;AAAA,EACvB,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,YAAY;AACd;AAGA,MAAM,4BAA4B,oBAAI,IAAI;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGD,SAAS,UAAU,OAA6C;AAC9D,SACE,OAAO,UAAU,YACjB,UAAU,QACV,YAAY,SACZ,OAAQ,MAAkC,WAAW;AAEzD;AAEA,IAAI,YAA2B;AAIxB,SAAS,qBAAoC;AAClD,MAAI,CAAC,gBAAgB,EAAE,cAAe,QAAO;AAE7C,MAAI,CAAC,WAAW;AACd,gBAAY,IAAI,OAAO;AAAA,MACrB,SAAS,eAAe;AAAA,MACxB,QAAQ,eAAe;AAAA,MACvB,SAAS;AAAA,MACT,YAAY;AAAA,MACZ,gBAAgB,EAAE,WAAW,yBAAyB;AAAA,IACxD,CAAC;AACD,WAAO,QAAQ,qCAAqC,eAAe,KAAK,cAAc,eAAe,QAAQ,KAAK,KAAK;AAAA,EACzH;AACA,SAAO;AACT;AAEA,SAAS,qBAAqB,OAAe,QAAyC;AACpF,QAAM,cAAuC;AAAA,IAC3C;AAAA,IACA,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,EAC9C;AAEA,MAAI,eAAe,qBAAqB,QAAQ;AAC9C,gBAAY,mBAAmB,eAAe;AAAA,EAChD;AAEA,SAAO;AACT;AAEA,eAAsB,YACpB,WACA,QACA,gBACA,QACoE;AACpE,QAAM,QAAQ,eAAe;AAE7B,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB,CAAC,gBAAgB,UAAU,KAAK,YAAY;AAAA,QAC1C,qBAAqB,OAAO,MAAM;AAAA,QAClC;AAAA,UACE,QAAQ,SAAS,YAAY,IAAI,CAAC,aAAa,MAAM,CAAC,IAAI;AAAA,UAC1D,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MACA;AAAA,MACA;AAAA,MACA,GAAG,cAAc,KAAK,KAAK;AAAA,IAC7B;AAEA,UAAM,UAAU,SAAS,UAAU,CAAC,GAAG,SAAS,SAAS,KAAK;AAC9D,QAAI,SAAS;AACX,aAAO,EAAE,SAAS,MAAM;AAAA,IAC1B;AAEA,UAAM,MAAM,6BAA6B,KAAK;AAC9C,WAAO,WAAW,GAAG,cAAc,qCAAqC,KAAK,IAAI,KAAK;AACtF,WAAO,EAAE,SAAS,MAAM,OAAO,OAAO,IAAI;AAAA,EAC5C,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,WAAW,GAAG,cAAc,qBAAqB,KAAK,KAAK,OAAO,IAAI,KAAK;AAClF,WAAO,EAAE,SAAS,MAAM,OAAO,OAAO,QAAQ;AAAA,EAChD;AACF;AAKA,SAAS,oBAAoB,OAAyB;AACpD,MAAI,CAAC,SAAS,OAAO,UAAU,SAAU,QAAO;AAGhD,QAAM,YAAa,OAA6B;AAChD,MAAI,cAAc,cAAc,cAAc,aAAa;AACzD,WAAO;AAAA,EACT;AAGA,MAAI,UAAU,KAAK,GAAG;AACpB,QAAI,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,KAAK;AACxH,aAAO;AAAA,IACT;AAAA,EACF;AAGA,QAAM,SAAS;AACf,QAAM,OAAO,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO;AAC7D,QAAM,SACJ,OAAO,OAAO,UAAU,YAAY,OAAO,UAAU,OAChD,OAAO,QACR;AACN,QAAM,YACJ,SACC,UAAU,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO,YAC1D,UAAU,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO;AAC7D,MAAI,aAAa,0BAA0B,IAAI,SAAS,GAAG;AACzD,WAAO;AAAA,EACT;AAGA,QAAM,UAAU,OAAO,OAAO,YAAY,WAAW,OAAO,QAAQ,YAAY,IAAI;AACpF,MACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,SAAS,KAC1B,QAAQ,SAAS,WAAW,KAC5B,QAAQ,SAAS,qBAAqB,KACtC,QAAQ,SAAS,cAAc,KAC/B,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,YAAY,GAC7B;AACA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAKA,SAAS,oBAAoB,SAAyB;AACpD,QAAM,mBAAmB,iBAAiB,cAAc,KAAK,IAAI,GAAG,OAAO;AAC3E,QAAM,SAAS,KAAK,OAAO,IAAI,wBAAwB;AACvD,SAAO,KAAK,IAAI,mBAAmB,QAAQ,iBAAiB,UAAU;AACxE;AAOA,eAAsB,sBACpB,SACA,QACA,WACA,QACoB;AAEpB,MAAI,CAAC,OAAO,SAAS;AACnB,WAAO,EAAE,SAAS,WAAW,MAAM;AAAA,EACrC;AAEA,MAAI,CAAC,WAAW;AACd,WAAO;AAAA,MACL;AAAA,MACA,WAAW;AAAA,MACX,OAAO;AAAA,MACP,cAAc;AAAA,QACZ,MAAM,UAAU;AAAA,QAChB,SAAS;AAAA,QACT,WAAW;AAAA,MACb;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAO,EAAE,SAAS,WAAW,IAAI,WAAW,OAAO,OAAO,yBAAyB;AAAA,EACrF;AAGA,QAAM,mBAAmB,QAAQ,SAAS,sBACtC,QAAQ,UAAU,GAAG,mBAAmB,IAAI,0CAC5C;AAKJ,QAAM,WAAW,MAAM;AACrB,QAAI,CAAC,OAAO,IAAK,QAAO;AACxB,QAAI;AACF,YAAM,IAAI,IAAI,IAAI,OAAO,GAAG;AAC5B,aAAO,GAAG,EAAE,MAAM,GAAG,EAAE,QAAQ;AAAA,IACjC,QAAQ;AACN,aAAO;AAAA,IACT;AAAA,EACF,GAAG;AACH,QAAM,UAAU,UAAU,aAAa,OAAO;AAAA;AAAA,IAAS;AAEvD,QAAM,SAAS,OAAO,UAClB;AAAA;AAAA,EAEJ,OAAO,2BAA2B,OAAO,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsDhD,gBAAgB,KACZ;AAAA;AAAA,EAEJ,OAAO;AAAA,EACP,gBAAgB;AAEhB,MAAI;AAGJ,WAAS,UAAU,GAAG,WAAW,iBAAiB,YAAY,WAAW;AACvE,QAAI;AACF,UAAI,YAAY,GAAG;AACjB,eAAO,QAAQ,4BAA4B,eAAe,KAAK,IAAI,KAAK;AAAA,MAC1E,OAAO;AACL,eAAO,WAAW,iBAAiB,OAAO,IAAI,iBAAiB,UAAU,IAAI,KAAK;AAAA,MACpF;AAEA,YAAM,WAAW,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAEA,UAAI,SAAS,SAAS;AACpB,eAAO,QAAQ,0BAA0B,SAAS,QAAQ,MAAM,eAAe,KAAK;AACpF,uBAAe,WAAW;AAC1B,eAAO,EAAE,SAAS,SAAS,SAAS,WAAW,KAAK;AAAA,MACtD;AAGA,aAAO,WAAW,oCAAoC,KAAK;AAC3D,qBAAe,aAAa,6BAA6B;AACzD,aAAO;AAAA,QACL;AAAA,QACA,WAAW;AAAA,QACX,OAAO;AAAA,QACP,cAAc;AAAA,UACZ,MAAM,UAAU;AAAA,UAChB,SAAS;AAAA,UACT,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IAEF,SAAS,KAAc;AACrB,kBAAY,cAAc,GAAG;AAG7B,YAAM,SAAS,UAAU,GAAG,IAAI,IAAI,SAAS;AAC7C,YAAM,OAAO,OAAO,QAAQ,YAAY,QAAQ,QAAQ,UAAU,MAC9D,OAAQ,IAAgC,IAAI,IAC5C;AACJ,aAAO,SAAS,kBAAkB,UAAU,CAAC,MAAM,UAAU,OAAO,YAAY,MAAM,UAAU,IAAI,eAAe,oBAAoB,GAAG,CAAC,KAAK,KAAK;AAGrJ,UAAI,oBAAoB,GAAG,KAAK,UAAU,iBAAiB,YAAY;AACrE,cAAM,UAAU,oBAAoB,OAAO;AAC3C,eAAO,WAAW,eAAe,OAAO,SAAS,KAAK;AACtD,YAAI;AAAE,gBAAM,MAAM,SAAS,MAAM;AAAA,QAAG,QAAQ;AAAE;AAAA,QAAO;AACrD;AAAA,MACF;AAGA;AAAA,IACF;AAAA,EACF;AAGA,QAAM,eAAe,WAAW,WAAW;AAC3C,SAAO,SAAS,wBAAwB,YAAY,iCAAiC,KAAK;AAC1F,iBAAe,aAAa,YAAY;AAExC,SAAO;AAAA,IACL;AAAA;AAAA,IACA,WAAW;AAAA,IACX,OAAO,0BAA0B,YAAY;AAAA,IAC7C,cAAc,aAAa;AAAA,MACzB,MAAM,UAAU;AAAA,MAChB,SAAS;AAAA,MACT,WAAW;AAAA,IACb;AAAA,EACF;AACF;AAOA,MAAM,0BAA0B;AA2ChC,eAAsB,sBACpB,YAQA,WACA,cACA,WACA,kBAAqC,CAAC,GAC4B;AAClE,QAAM,iBAAiB,WAAW,MAAM,GAAG,uBAAuB;AAMlE,QAAM,iBAAiB,CAAC,IAAI,IAAI,IAAI,IAAI,GAAG,GAAG,GAAG,GAAG,GAAG,CAAC;AACxD,QAAM,gBAAgB,CAAC,SAAyB,eAAe,OAAO,CAAC,KAAK;AAG5E,QAAM,QAAkB,CAAC;AACzB,aAAW,OAAO,gBAAgB;AAChC,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACzD,QAAQ;AACN,eAAS,IAAI;AAAA,IACf;AACA,UAAM,UAAU,IAAI,QAAQ,SAAS,MACjC,IAAI,QAAQ,MAAM,GAAG,GAAG,IAAI,QAC5B,IAAI;AACR,UAAM,KAAK,IAAI,IAAI,IAAI,OAAO,cAAc,IAAI,IAAI,CAAC,IAAI,IAAI,KAAK,WAAM,MAAM,WAAM,OAAO,EAAE;AAAA,EAC/F;AAEA,QAAM,mBAAmB,gBAAgB,SAAS,IAC9C,gBAAgB,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE,EAAE,KAAK,IAAI,IAC9C;AACJ,QAAM,SAAQ,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE;AAElD,QAAM,SAAS;AAAA;AAAA,aAEJ,SAAS;AAAA,SACb,KAAK;AAAA;AAAA;AAAA,EAGZ,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAmDD,eAAe,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAMpB,eAAe,MAAM,cAAc,YAAY;AAAA,EAC/D,MAAM,KAAK,IAAI,CAAC;AAEhB,MAAI;AACF,WAAO,QAAQ,eAAe,eAAe,MAAM,2BAA2B,KAAK;AAEnF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,YAAM,SAAS,SAAS,SAAS;AACjC,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,MAAM,OAAO,OAAO;AAAA,IACvC;AAGA,UAAM,UAAU,SAAS,QAAQ,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACrG,UAAM,SAAS,KAAK,MAAM,OAAO;AAKjC,QAAI,CAAC,OAAO,SAAS,OAAO,OAAO,cAAc,YAAY,CAAC,MAAM,QAAQ,OAAO,OAAO,GAAG;AAC3F,YAAM,SAAS;AACf,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,MAAM,OAAO,OAAO;AAAA,IACvC;AAEA,WAAO,QAAQ,4BAA4B,OAAO,QAAQ,OAAO,OAAK,EAAE,SAAS,iBAAiB,EAAE,MAAM,oBAAoB,KAAK;AACnI,mBAAe,SAAS;AACxB,WAAO,EAAE,QAAQ,OAAO;AAAA,EAC1B,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,SAAS,0BAA0B,OAAO,IAAI,KAAK;AAC1D,mBAAe,WAAW,OAAO;AACjC,WAAO,EAAE,QAAQ,MAAM,OAAO,0BAA0B,OAAO,GAAG;AAAA,EACpE;AACF;AAEA,eAAsB,+BACpB,YAKA,WACA,iBACA,WAC8D;AAC9D,QAAM,kBAAkB,WAAW,MAAM,GAAG,EAAE;AAC9C,QAAM,QAAQ,gBAAgB,IAAI,CAAC,QAAQ;AACzC,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACzD,QAAQ;AACN,eAAS,IAAI;AAAA,IACf;AACA,WAAO,IAAI,IAAI,IAAI,KAAK,IAAI,KAAK,WAAM,MAAM;AAAA,EAC/C,CAAC;AAED,QAAM,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aASJ,SAAS;AAAA;AAAA;AAAA,EAGpB,gBAAgB,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE,EAAE,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA,EAGvD,MAAM,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAShB,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,YAAM,SAAS,SAAS,SAAS;AACjC,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,CAAC,GAAG,OAAO,OAAO;AAAA,IACrC;AAEA,UAAM,UAAU,SAAS,QAAQ,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACrG,UAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,mBAAe,SAAS;AACxB,WAAO,EAAE,QAAQ,MAAM,QAAQ,OAAO,cAAc,IAAI,OAAO,iBAAiB,CAAC,EAAE;AAAA,EACrF,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,SAAS,4CAA4C,OAAO,IAAI,KAAK;AAC5E,mBAAe,WAAW,OAAO;AACjC,WAAO,EAAE,QAAQ,CAAC,GAAG,OAAO,QAAQ;AAAA,EACtC;AACF;AA0BA,MAAM,qBAAqB,oBAAI,IAAI;AAAA,EACjC;AAAA,EAAQ;AAAA,EAAO;AAAA,EAAa;AAAA,EAAa;AAAA,EAAW;AAAA,EACpD;AAAA,EAAa;AAAA,EAAkB;AACjC,CAAC;AAED,MAAM,kBAAkB,oBAAI,IAAI,CAAC,QAAQ,SAAS,UAAU,OAAO,CAAC;AACpE,MAAM,iBAAiB,oBAAI,IAAmB,CAAC,UAAU,OAAO,MAAM,CAAC;AACvE,MAAM,mBAAmB,oBAAI,IAAI,CAAC,cAAc,cAAc,CAAC;AAE/D,SAAS,cAAc,OAAmC;AACxD,SAAO,MAAM,QAAQ,KAAK,KAAK,MAAM,MAAM,CAAC,MAAM,OAAO,MAAM,QAAQ;AACzE;AAEA,SAAS,YAAY,OAA8C;AACjE,SAAO,MAAM,QAAQ,KAAK,KAAK,MAAM,MAAM,CAAC,MAAM;AAChD,QAAI,OAAO,MAAM,YAAY,MAAM,KAAM,QAAO;AAChD,UAAM,OAAQ,EAA8B;AAC5C,UAAM,SAAU,EAA8B;AAC9C,WAAO,OAAO,SAAS,YAClB,iBAAiB,IAAI,IAAI,KACzB,OAAO,WAAW,YAClB,OAAO,KAAK,EAAE,SAAS;AAAA,EAC9B,CAAC;AACH;AAEO,SAAS,mBAAmB,KAAmC;AACpE,MAAI;AACF,UAAM,UAAU,IAAI,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACxF,UAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UAAM,aAAa,OAAO,OAAO,eAAe,WAAW,OAAO,aAAa;AAC/E,QAAI,CAAC,cAAc,CAAC,mBAAmB,IAAI,UAAU,EAAG,QAAO;AAE/D,UAAM,mBAAmB,OAAO,OAAO,qBAAqB,WAAW,OAAO,mBAAmB;AACjG,QAAI,CAAC,oBAAoB,CAAC,gBAAgB,IAAI,gBAAgB,EAAG,QAAO;AAExE,UAAM,iBAAiB,OAAO;AAC9B,QAAI,OAAO,mBAAmB,YAAY,CAAC,eAAe,IAAI,cAA+B,EAAG,QAAO;AAEvG,QAAI,CAAC,YAAY,OAAO,mBAAmB,KAAK,OAAO,oBAAoB,WAAW,EAAG,QAAO;AAChG,QAAI,CAAC,cAAc,OAAO,aAAa,KAAK,OAAO,cAAc,WAAW,EAAG,QAAO;AAEtF,WAAO;AAAA,MACL;AAAA,MACA,mBAAmB,OAAO,OAAO,sBAAsB,WAAW,OAAO,oBAAoB;AAAA,MAC7F;AAAA,MACA,uBAAuB,OAAO,OAAO,0BAA0B,WAAW,OAAO,wBAAwB;AAAA,MACzG;AAAA,MACA,qBAAqB,OAAO;AAAA,MAC5B,eAAe,OAAO,cAAc,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC;AAAA,MACrE,iBAAiB,cAAc,OAAO,eAAe,IAAI,OAAO,kBAAkB,CAAC;AAAA,MACnF,eAAe,cAAc,OAAO,aAAa,IAAI,OAAO,gBAAgB,CAAC;AAAA,MAC7E,eAAe,cAAc,OAAO,aAAa,IAAI,OAAO,gBAAgB,CAAC;AAAA,IAC/E;AAAA,EACF,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,eAAsB,sBACpB,MACA,WACA,QAC+B;AAC/B,QAAM,SAAQ,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE;AAElD,QAAM,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAOT,IAAI;AAAA,SACH,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA0CZ,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,aAAO,WAAW,kDAAkD,SAAS,SAAS,SAAS,IAAI,KAAK;AACxG,qBAAe,WAAW,SAAS,SAAS,gBAAgB;AAC5D,aAAO;AAAA,IACT;AAEA,UAAM,QAAQ,mBAAmB,SAAS,OAAO;AACjD,QAAI,CAAC,OAAO;AACV,aAAO,WAAW,wDAAwD,KAAK;AAC/E,qBAAe,WAAW,+BAA+B;AACzD,aAAO;AAAA,IACT;AAEA,mBAAe,SAAS;AACxB,WAAO;AAAA,EACT,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,WAAW,qCAAqC,OAAO,IAAI,KAAK;AACvE,mBAAe,WAAW,OAAO;AACjC,WAAO;AAAA,EACT;AACF;AAEO,SAAS,oBAAoB,OAA8B;AAChE,QAAM,QAAkB,CAAC;AAEzB,QAAM,KAAK,wCAAwC;AACnD,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,qBAAqB,MAAM,UAAU,aAAQ,MAAM,iBAAiB,EAAE;AACjF,QAAM,KAAK,yBAAyB,MAAM,cAAc,aAAQ,MAAM,qBAAqB,EAAE;AAC7F,QAAM,KAAK,oBAAoB,MAAM,gBAAgB,IAAI;AACzD,QAAM,KAAK,EAAE;AAEb,MAAI,MAAM,oBAAoB,SAAS,GAAG;AACxC,UAAM,KAAK,yBAAyB;AACpC,UAAM,oBAAoB,QAAQ,CAAC,MAAM,MAAM;AAC7C,YAAM,KAAK,GAAG,IAAI,CAAC,OAAO,KAAK,IAAI,aAAQ,KAAK,MAAM,EAAE;AAAA,IAC1D,CAAC;AACD,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,sBAAsB,MAAM,cAAc,MAAM,mFAA8E;AACzI,eAAW,QAAQ,MAAM,eAAe;AACtC,YAAM,KAAK,KAAK,IAAI,EAAE;AAAA,IACxB;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,gBAAgB,SAAS,GAAG;AACpC,UAAM,KAAK,sFAAuF;AAClG,eAAW,QAAQ,MAAM,gBAAiB,OAAM,KAAK,KAAK,IAAI,EAAE;AAChE,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,mBAAmB;AAC9B,eAAW,OAAO,MAAM,cAAe,OAAM,KAAK,KAAK,GAAG,EAAE;AAC5D,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,mBAAmB;AAC9B,eAAW,KAAK,MAAM,cAAe,OAAM,KAAK,KAAK,CAAC,EAAE;AACxD,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,QAAM,KAAK,KAAK;AAChB,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,iLAAiL;AAE5L,SAAO,MAAM,KAAK,IAAI;AACxB;",
4
+ "sourcesContent": ["/**\n * LLM Processor for content extraction\n * Uses OpenRouter via OPENROUTER_API_KEY for AI-powered content filtering\n * Implements robust retry logic and NEVER throws\n */\n\nimport OpenAI from 'openai';\nimport { LLM_EXTRACTION, getCapabilities } from '../config/index.js';\nimport {\n classifyError,\n sleep,\n ErrorCode,\n withStallProtection,\n type StructuredError,\n} from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\n\n/** Maximum input characters for LLM processing (~25k tokens) */\nconst MAX_LLM_INPUT_CHARS = 100_000 as const;\n\n/** LLM client timeout in milliseconds */\nconst LLM_CLIENT_TIMEOUT_MS = 120_000 as const;\n\n/** Jitter factor for exponential backoff */\nconst BACKOFF_JITTER_FACTOR = 0.3 as const;\n\n/** Stall detection timeout \u2014 abort if no response in this time */\nconst LLM_STALL_TIMEOUT_MS = 15_000 as const;\n\n/** Hard request deadline for LLM calls */\nconst LLM_REQUEST_DEADLINE_MS = 30_000 as const;\n\n// ============================================================================\n// LLM health tracking \u2014 surfaced via health://status so capability-aware\n// clients can branch on degraded mode without parsing per-call footers.\n// ============================================================================\n\ntype LLMHealthKind = 'planner' | 'extractor';\n\nexport interface LLMHealthSnapshot {\n readonly lastPlannerOk: boolean;\n readonly lastExtractorOk: boolean;\n readonly lastPlannerCheckedAt: string | null;\n readonly lastExtractorCheckedAt: string | null;\n readonly lastPlannerError: string | null;\n readonly lastExtractorError: string | null;\n readonly plannerConfigured: boolean;\n readonly extractorConfigured: boolean;\n /** Failures since the last success. Reset to 0 on `markLLMSuccess`. */\n readonly consecutivePlannerFailures: number;\n readonly consecutiveExtractorFailures: number;\n}\n\nconst llmHealth = {\n lastPlannerOk: false,\n lastExtractorOk: false,\n lastPlannerCheckedAt: null as string | null,\n lastExtractorCheckedAt: null as string | null,\n lastPlannerError: null as string | null,\n lastExtractorError: null as string | null,\n consecutivePlannerFailures: 0,\n consecutiveExtractorFailures: 0,\n};\n\nexport function markLLMSuccess(kind: LLMHealthKind): void {\n const ts = new Date().toISOString();\n if (kind === 'planner') {\n llmHealth.lastPlannerOk = true;\n llmHealth.lastPlannerCheckedAt = ts;\n llmHealth.lastPlannerError = null;\n llmHealth.consecutivePlannerFailures = 0;\n } else {\n llmHealth.lastExtractorOk = true;\n llmHealth.lastExtractorCheckedAt = ts;\n llmHealth.lastExtractorError = null;\n llmHealth.consecutiveExtractorFailures = 0;\n }\n}\n\nexport function markLLMFailure(kind: LLMHealthKind, err: unknown): void {\n const ts = new Date().toISOString();\n const message = err instanceof Error ? err.message : String(err ?? 'unknown error');\n if (kind === 'planner') {\n llmHealth.lastPlannerOk = false;\n llmHealth.lastPlannerCheckedAt = ts;\n llmHealth.lastPlannerError = message;\n llmHealth.consecutivePlannerFailures += 1;\n } else {\n llmHealth.lastExtractorOk = false;\n llmHealth.lastExtractorCheckedAt = ts;\n llmHealth.lastExtractorError = message;\n llmHealth.consecutiveExtractorFailures += 1;\n }\n}\n\nexport function getLLMHealth(): LLMHealthSnapshot {\n const cap = getCapabilities();\n return {\n lastPlannerOk: llmHealth.lastPlannerOk,\n lastExtractorOk: llmHealth.lastExtractorOk,\n lastPlannerCheckedAt: llmHealth.lastPlannerCheckedAt,\n lastExtractorCheckedAt: llmHealth.lastExtractorCheckedAt,\n lastPlannerError: llmHealth.lastPlannerError,\n lastExtractorError: llmHealth.lastExtractorError,\n // Static capability \u2014 based on env presence at boot. Runtime health (above)\n // tells whether the last attempt actually succeeded.\n plannerConfigured: cap.llmExtraction,\n extractorConfigured: cap.llmExtraction,\n consecutivePlannerFailures: llmHealth.consecutivePlannerFailures,\n consecutiveExtractorFailures: llmHealth.consecutiveExtractorFailures,\n };\n}\n\n/** Test-only \u2014 reset state between tests. Not exported from index. */\nexport function _resetLLMHealthForTests(): void {\n llmHealth.lastPlannerOk = false;\n llmHealth.lastExtractorOk = false;\n llmHealth.lastPlannerCheckedAt = null;\n llmHealth.lastExtractorCheckedAt = null;\n llmHealth.lastPlannerError = null;\n llmHealth.lastExtractorError = null;\n llmHealth.consecutivePlannerFailures = 0;\n llmHealth.consecutiveExtractorFailures = 0;\n}\n\ninterface ProcessingConfig {\n readonly enabled: boolean;\n readonly extract: string | undefined;\n readonly url?: string;\n}\n\ninterface LLMResult {\n readonly content: string;\n readonly processed: boolean;\n readonly error?: string;\n readonly errorDetails?: StructuredError;\n}\n\n// LLM-specific retry configuration\nconst LLM_RETRY_CONFIG = {\n maxRetries: 2,\n baseDelayMs: 1000,\n maxDelayMs: 5000,\n} as const;\n\n// OpenRouter/OpenAI specific retryable error codes (using Set for type-safe lookup)\nconst RETRYABLE_LLM_ERROR_CODES = new Set([\n 'rate_limit_exceeded',\n 'server_error',\n 'timeout',\n 'service_unavailable',\n]);\n\n/** Type guard for errors with an HTTP status code */\nfunction hasStatus(error: unknown): error is { status: number } {\n return (\n typeof error === 'object' &&\n error !== null &&\n 'status' in error &&\n typeof (error as Record<string, unknown>).status === 'number'\n );\n}\n\nlet llmClient: OpenAI | null = null;\n\ntype OpenAITextGenerator = Pick<OpenAI, 'chat'>;\n\nexport function createLLMProcessor(): OpenAI | null {\n if (!getCapabilities().llmExtraction) return null;\n\n if (!llmClient) {\n llmClient = new OpenAI({\n baseURL: LLM_EXTRACTION.BASE_URL,\n apiKey: LLM_EXTRACTION.API_KEY,\n timeout: LLM_CLIENT_TIMEOUT_MS,\n maxRetries: 0,\n defaultHeaders: { 'X-Title': 'mcp-research-powerpack' },\n });\n mcpLog('info', `LLM extraction configured (model: ${LLM_EXTRACTION.MODEL}, baseURL: ${LLM_EXTRACTION.BASE_URL})`, 'llm');\n }\n return llmClient;\n}\n\nfunction buildChatRequestBody(model: string, prompt: string): Record<string, unknown> {\n const requestBody: Record<string, unknown> = {\n model,\n messages: [{ role: 'user', content: prompt }],\n };\n\n if (LLM_EXTRACTION.REASONING_EFFORT !== 'none') {\n requestBody.reasoning_effort = LLM_EXTRACTION.REASONING_EFFORT;\n }\n\n return requestBody;\n}\n\nexport async function requestText(\n processor: OpenAITextGenerator,\n prompt: string,\n operationLabel: string,\n signal?: AbortSignal,\n): Promise<{ content: string | null; model: string; error?: string }> {\n const model = LLM_EXTRACTION.MODEL;\n\n try {\n const response = await withStallProtection(\n (stallSignal) => processor.chat.completions.create(\n buildChatRequestBody(model, prompt) as unknown as OpenAI.ChatCompletionCreateParamsNonStreaming,\n {\n signal: signal ? AbortSignal.any([stallSignal, signal]) : stallSignal,\n timeout: LLM_REQUEST_DEADLINE_MS,\n },\n ),\n LLM_STALL_TIMEOUT_MS,\n 3,\n `${operationLabel} (${model})`,\n );\n\n const content = response.choices?.[0]?.message?.content?.trim();\n if (content) {\n return { content, model };\n }\n\n const err = `Empty response from model ${model}`;\n mcpLog('warning', `${operationLabel} returned empty content for model ${model}`, 'llm');\n return { content: null, model, error: err };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('warning', `${operationLabel} failed for model ${model}: ${message}`, 'llm');\n return { content: null, model, error: message };\n }\n}\n\n/**\n * Check if an LLM error is retryable\n */\nfunction isRetryableLLMError(error: unknown): boolean {\n if (!error || typeof error !== 'object') return false;\n\n // Stall/timeout protection errors - always retry these\n const stallCode = (error as { code?: string })?.code;\n if (stallCode === 'ESTALLED' || stallCode === 'ETIMEDOUT') {\n return true;\n }\n\n // Check HTTP status codes\n if (hasStatus(error)) {\n if (error.status === 429 || error.status === 500 || error.status === 502 || error.status === 503 || error.status === 504) {\n return true;\n }\n }\n\n // Check error codes from OpenAI/OpenRouter\n const record = error as Record<string, unknown>;\n const code = typeof record.code === 'string' ? record.code : undefined;\n const nested =\n typeof record.error === 'object' && record.error !== null\n ? (record.error as Record<string, unknown>)\n : null;\n const errorCode =\n code ??\n (nested && typeof nested.code === 'string' ? nested.code : undefined) ??\n (nested && typeof nested.type === 'string' ? nested.type : undefined);\n if (errorCode && RETRYABLE_LLM_ERROR_CODES.has(errorCode)) {\n return true;\n }\n\n // Check message for common patterns\n const message = typeof record.message === 'string' ? record.message.toLowerCase() : '';\n if (\n message.includes('rate limit') ||\n message.includes('timeout') ||\n message.includes('timed out') ||\n message.includes('service unavailable') ||\n message.includes('server error') ||\n message.includes('connection') ||\n message.includes('econnreset')\n ) {\n return true;\n }\n\n return false;\n}\n\n/**\n * Calculate backoff delay with jitter for LLM retries\n */\nfunction calculateLLMBackoff(attempt: number): number {\n const exponentialDelay = LLM_RETRY_CONFIG.baseDelayMs * Math.pow(2, attempt);\n const jitter = Math.random() * BACKOFF_JITTER_FACTOR * exponentialDelay;\n return Math.min(exponentialDelay + jitter, LLM_RETRY_CONFIG.maxDelayMs);\n}\n\n/**\n * Process content with LLM extraction\n * NEVER throws - always returns a valid LLMResult\n * Implements retry logic with exponential backoff for transient failures\n */\nexport async function processContentWithLLM(\n content: string,\n config: ProcessingConfig,\n processor?: OpenAI | null,\n signal?: AbortSignal\n): Promise<LLMResult> {\n // Early returns for invalid/skip conditions\n if (!config.enabled) {\n return { content, processed: false };\n }\n\n if (!processor) {\n return {\n content,\n processed: false,\n error: 'LLM processor not available (LLM_EXTRACTION_API_KEY or OPENROUTER_API_KEY not set)',\n errorDetails: {\n code: ErrorCode.AUTH_ERROR,\n message: 'LLM processor not available',\n retryable: false,\n },\n };\n }\n\n if (!content?.trim()) {\n return { content: content || '', processed: false, error: 'Empty content provided' };\n }\n\n // Truncate extremely long content to avoid token limits\n const truncatedContent = content.length > MAX_LLM_INPUT_CHARS\n ? content.substring(0, MAX_LLM_INPUT_CHARS) + '\\n\\n[Content truncated due to length]'\n : content;\n\n // Sanitize URL before sending to LLM: drop query string and fragment\n // so signed URLs, session tokens, auth params, or tracking hashes never\n // land in a third-party LLM prompt. Keep origin + path for page-type classification.\n const safeUrl = (() => {\n if (!config.url) return undefined;\n try {\n const u = new URL(config.url);\n return `${u.origin}${u.pathname}`;\n } catch {\n return undefined;\n }\n })();\n const urlLine = safeUrl ? `PAGE URL: ${safeUrl}\\n\\n` : '';\n\n const prompt = config.extract\n ? `You are a factual extractor for a research agent. Extract ONLY the information that matches the instruction below. Do not summarize, interpret, or editorialize.\n\n${urlLine}EXTRACTION INSTRUCTION: ${config.extract}\n\nSTEP 1 \u2014 Classify this page. Look at the URL if present, plus structural cues (code blocks, table patterns, comment threads, marketing copy). Pick ONE:\n\\`docs | changelog | github-readme | github-thread | reddit | hackernews | forum | blog | marketing | announcement | qa | cve | paper | release-notes | other\\`\n\nSTEP 2 \u2014 Adjust emphasis by page type:\n- docs / changelog / github-readme / release-notes \u2192 API signatures, version numbers, flags, exact config keys, code blocks. Copy verbatim. Preserve tables as tables.\n- github-thread \u2192 weight MAINTAINER comments (label \"[maintainer]\") over drive-by commenters. Preserve stacktraces verbatim. Capture chronological resolution \u2014 what was decided and when. Link the accepted-fix commit/PR if referenced.\n- reddit / hackernews / forum \u2192 lived experience. Quote verbatim with attribution (\"u/foo wrote: \u2026\" or \"user <name>\"). Prioritize replies with stack details, specific failure stories, or replies that contradict the OP. Record overall sentiment distribution as one bullet if clear skew (\"~70% agree / ~20% dissent / rest off-topic\"). Drop context-free opinions (\"this sucks\") from Matches.\n- blog \u2192 prioritize concrete reproductions, code, measurements. If the author makes a claim without evidence, mark \"[unsourced claim]\".\n- marketing / announcement \u2192 pricing tiers, feature matrices verbatim, free-tier quotas, enterprise contact. Preserve tables as tables. Treat roadmap/future-tense claims skeptically \u2014 note them as \"[announced, not shipped]\" when framing is future-tense.\n- qa (stackoverflow) \u2192 accepted answer's code + high-voted disagreements. Always note the answer date \u2014 SO rots.\n- cve \u2192 CVSS vector verbatim, CWE, CPE ranges, affected versions, fix version, references. Each with its label.\n- paper \u2192 claim, method, dataset, benchmark numbers, comparison baseline. Preserve numeric deltas verbatim.\n\nSTEP 3 \u2014 Emit markdown with these sections, in order:\n\n## Source\n- URL: <verbatim if visible, else \"unknown\">\n- Page type: <the type you picked>\n- Page date: <verbatim if visible, else \"not visible\">\n- Author / maintainer (if identifiable): <verbatim>\n\n## Matches\nOne bullet per distinct piece of matching info:\n- **<short label>** \u2014 the information. Quote VERBATIM for: numbers, versions, dates, API names, prices, error messages, stacktraces, CVSS vectors, benchmark scores, command flags, proper nouns, and people's words. Backticks for code/identifiers. Preserve tables.\n\n## Not found\nEvery part of the extraction instruction this page did NOT answer. Be explicit. Example: \"Enterprise pricing contact \u2014 not present on this page.\"\n\n## Follow-up signals\nShort bullets \u2014 NEW angles this page surfaced that the agent should investigate. Include: new terms, unexpected vendor names, contradicting claims, referenced-but-unscraped URLs. Copy URLs VERBATIM from the source; if only anchor text is visible, write \"anchor: <text> (URL not in scraped content)\". Skip this section if nothing new surfaced. Do NOT invent.\n\n## Contradictions\n(Include this section only if the page contains internally contradictory claims.) Bullet each contradiction with both sides quoted verbatim.\n\n## Truncation\n(Include only if content appears cut mid-element.) \"Content cut mid-<table row / code block / comment / paragraph>; extraction may be incomplete for <section>.\"\n\nRULES:\n- Never paraphrase numbers, versions, code, or quoted text.\n- If an instruction item is not answered, it goes in \"Not found\" \u2014 do NOT invent an answer to please the caller.\n- Preserve code blocks, command examples, tables exactly.\n- Do NOT add commentary or recommendations outside \"Follow-up signals\".\n- Page language \u2260 English: quote verbatim in the original language AND provide a parenthetical gloss in English.\n- Content clearly failed to load: return ONLY a single line, choosing from:\n \\`## Matches\\\\n_Page did not load: 404_\\`\n \\`## Matches\\\\n_Page did not load: login-wall_\\`\n \\`## Matches\\\\n_Page did not load: paywall_\\`\n \\`## Matches\\\\n_Page did not load: JS-render-empty_\\`\n \\`## Matches\\\\n_Page did not load: non-text-asset_\\`\n \\`## Matches\\\\n_Page did not load: truncated-before-relevant-section_\\`\n\nContent:\n${truncatedContent}`\n : `Clean the following page content: drop navigation, ads, cookie banners, footers, author bios, related-article lists. Preserve headings, paragraphs, code blocks, tables, and inline links as \\`[text](url)\\`. Do NOT summarize \u2014 preserve the full body.\n\n${urlLine}Content:\n${truncatedContent}`;\n\n let lastError: StructuredError | undefined;\n\n // Retry loop\n for (let attempt = 0; attempt <= LLM_RETRY_CONFIG.maxRetries; attempt++) {\n try {\n if (attempt === 0) {\n mcpLog('info', `Starting extraction with ${LLM_EXTRACTION.MODEL}`, 'llm');\n } else {\n mcpLog('warning', `Retry attempt ${attempt}/${LLM_RETRY_CONFIG.maxRetries}`, 'llm');\n }\n\n const response = await requestText(\n processor,\n prompt,\n 'LLM extraction',\n signal,\n );\n\n if (response.content) {\n mcpLog('info', `Successfully extracted ${response.content.length} characters`, 'llm');\n markLLMSuccess('extractor');\n return { content: response.content, processed: true };\n }\n\n // Empty response - not retryable\n mcpLog('warning', 'Received empty response from LLM', 'llm');\n markLLMFailure('extractor', 'LLM returned empty response');\n return {\n content,\n processed: false,\n error: 'LLM returned empty response',\n errorDetails: {\n code: ErrorCode.INTERNAL_ERROR,\n message: 'LLM returned empty response',\n retryable: false,\n },\n };\n\n } catch (err: unknown) {\n lastError = classifyError(err);\n\n // Log the error\n const status = hasStatus(err) ? err.status : undefined;\n const code = typeof err === 'object' && err !== null && 'code' in err\n ? String((err as Record<string, unknown>).code)\n : undefined;\n mcpLog('error', `Error (attempt ${attempt + 1}): ${lastError.message} [status=${status}, code=${code}, retryable=${isRetryableLLMError(err)}]`, 'llm');\n\n // Check if we should retry\n if (isRetryableLLMError(err) && attempt < LLM_RETRY_CONFIG.maxRetries) {\n const delayMs = calculateLLMBackoff(attempt);\n mcpLog('warning', `Retrying in ${delayMs}ms...`, 'llm');\n try { await sleep(delayMs, signal); } catch { break; }\n continue;\n }\n\n // Non-retryable or max retries reached\n break;\n }\n }\n\n // All attempts failed - return original content with error info\n const errorMessage = lastError?.message || 'Unknown LLM error';\n mcpLog('error', `All attempts failed: ${errorMessage}. Returning original content.`, 'llm');\n markLLMFailure('extractor', errorMessage);\n\n return {\n content, // Return original content as fallback\n processed: false,\n error: `LLM extraction failed: ${errorMessage}`,\n errorDetails: lastError || {\n code: ErrorCode.UNKNOWN_ERROR,\n message: errorMessage,\n retryable: false,\n },\n };\n}\n\n// ============================================================================\n// Web-Search Result Classification\n// ============================================================================\n\n/** Maximum URLs to send to the LLM for classification */\nconst MAX_CLASSIFICATION_URLS = 50 as const;\n\n/** Classification tiers */\ntype ClassificationTier = 'HIGHLY_RELEVANT' | 'MAYBE_RELEVANT' | 'OTHER';\n\nexport interface ClassificationEntry {\n readonly rank: number;\n readonly tier: ClassificationTier;\n readonly source_type?: string;\n readonly reason?: string;\n}\n\nexport interface ClassificationGap {\n readonly id: number;\n readonly description: string;\n}\n\nexport interface ClassificationResult {\n readonly title: string;\n readonly synthesis: string;\n readonly results: ClassificationEntry[];\n readonly refine_queries?: Array<{\n readonly query: string;\n readonly rationale: string;\n readonly gap_id?: number;\n }>;\n readonly confidence?: 'high' | 'medium' | 'low';\n readonly confidence_reason?: string;\n readonly gaps?: ClassificationGap[];\n}\n\nexport interface RefineQuerySuggestion {\n readonly query: string;\n readonly rationale: string;\n readonly gap_id?: number;\n readonly gap_description?: string;\n}\n\n/**\n * Classify web-search results by relevance to an objective using the LLM.\n * Sends only titles, snippets, and domain names \u2014 does NOT fetch URLs.\n * Returns null on failure (caller should fall back to raw output).\n */\nexport async function classifySearchResults(\n rankedUrls: ReadonlyArray<{\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n readonly snippet: string;\n readonly frequency: number;\n readonly queries: string[];\n }>,\n objective: string,\n totalQueries: number,\n processor: OpenAI,\n previousQueries: readonly string[] = [],\n): Promise<{ result: ClassificationResult | null; error?: string }> {\n const urlsToClassify = rankedUrls.slice(0, MAX_CLASSIFICATION_URLS);\n\n // Descending static weights fed to the LLM. Higher-ranked URLs get a bigger\n // weight so the classifier biases HIGHLY_RELEVANT toward them. The weights\n // here are a shown-to-LLM summary, not the internal CTR ranking (which\n // still runs in url-aggregator.ts). Rank 11+ all bucket to w=1.\n const STATIC_WEIGHTS = [30, 20, 15, 10, 8, 6, 5, 4, 3, 2] as const;\n const weightForRank = (rank: number): number => STATIC_WEIGHTS[rank - 1] ?? 1;\n\n // Build compressed result list \u2014 weight + title + domain + snippet (truncated)\n const lines: string[] = [];\n for (const url of urlsToClassify) {\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n const snippet = url.snippet.length > 120\n ? url.snippet.slice(0, 117) + '...'\n : url.snippet;\n lines.push(`[${url.rank}] w=${weightForRank(url.rank)} ${url.title} \u2014 ${domain} \u2014 ${snippet}`);\n }\n\n const prevQueriesBlock = previousQueries.length > 0\n ? previousQueries.map((q) => `- ${q}`).join('\\n')\n : '- (none provided)';\n const today = new Date().toISOString().slice(0, 10);\n\n const prompt = `You are the relevance filter for a research agent. Classify each search result below against the objective and produce a structured analysis.\n\nOBJECTIVE: ${objective}\nTODAY: ${today}\n\nPREVIOUS QUERIES (already run \u2014 do NOT paraphrase in refine_queries):\n${prevQueriesBlock}\n\nReturn ONLY a JSON object (no markdown, no code fences):\n\n{\n \"title\": \"2\u20138 word label for this RESULT CLUSTER (not the objective)\",\n \"synthesis\": \"3\u20135 sentences grounded in the results. Every non-trivial claim cites a rank in [brackets], e.g. '[3] documents the flag; [7][12] report it is broken on macOS.' A synthesis with zero citations is invalid.\",\n \"confidence\": \"high | medium | low\",\n \"confidence_reason\": \"one sentence \u2014 why\",\n \"gaps\": [\n { \"id\": 0, \"description\": \"specific, actionable thing the current results do NOT answer \u2014 not 'more info needed'\" }\n ],\n \"refine_queries\": [\n { \"query\": \"concrete next search\", \"gap_id\": 0, \"rationale\": \"\u226412 words\" }\n ],\n \"results\": [\n {\n \"rank\": 1,\n \"tier\": \"HIGHLY_RELEVANT | MAYBE_RELEVANT | OTHER\",\n \"source_type\": \"vendor_doc | github | reddit | hackernews | blog | news | marketing | stackoverflow | cve | paper | release_notes | aggregator | other\",\n \"reason\": \"\u226412 words citing the snippet cue that drove the tier\"\n }\n ]\n}\n\nWEIGHT SCHEME: each row is prefixed with a weight (w=N). Higher weight means the URL ranked better across input queries \u2014 prefer HIGHLY_RELEVANT for high-weight rows when content matches the objective. Weight alone never justifies HIGHLY_RELEVANT; snippet cues still drive the decision.\n\nSOURCE-OF-TRUTH RUBRIC (the \"primary source\" is goal-dependent \u2014 infer goal type from the objective):\n- spec / API / config questions \u2192 vendor_doc, github (README, RFC), release_notes are primary\n- bug / failure-mode questions \u2192 github (issue/PR), stackoverflow are primary\n- migration / sentiment / lived-experience \u2192 reddit, hackernews, blog are primary; docs are secondary\n- pricing / commercial \u2192 marketing (the vendor's own pricing page IS the primary source, but treat feature lists skeptically)\n- security / CVE \u2192 cve databases, distro security trackers (nvd.nist.gov, security-tracker.debian.org, ubuntu.com/security) are primary\n- synthesis / open-ended \u2192 blend; no single type is primary\n- product launch \u2192 vendor_doc + news + marketing for the launch itself; blogs + reddit for independent verification\n\nFRESHNESS: proportional to topic velocity. For a week-old release, demote anything older than 30 days. For general tech questions, demote older than 18 months. For stable protocols (HTTP, TCP, POSIX), don't demote by age.\n\nCONFIDENCE:\n- high = \u22653 HIGHLY_RELEVANT results from INDEPENDENT domains agree on the core answer\n- medium = \u22652 HIGHLY_RELEVANT exist but disagree or share a domain; OR a single authoritative primary source answers it\n- low = otherwise; snippet-only judgments cap at medium\n\nREFINE QUERIES \u2014 each MUST differ from every previousQuery by:\n- a new operator (site:, quotes, verbatim version number), OR\n- a domain-specific noun ABSENT from every prior query\nAdding a year alone does NOT count as differentiation.\nEach refine_query MUST reference a specific gap_id from the gaps array above.\nProduce 4\u20138 refine_queries total. Cover: (a) a primary-source probe, (b) a temporal sharpener, (c) a failure-mode or comparison probe, (d) at least one new-term probe seeded by a specific result's snippet.\n\nRULES:\n- Classify ALL ${urlsToClassify.length} results. Do not skip or collapse any.\n- Use only the three tier values.\n- Judge from title + domain + snippet only. Do NOT invent facts not present in the snippet.\n- If ALL results are OTHER: synthesis = \"\", confidence = \"low\", and \\`gaps\\` must explicitly state why the current queries missed the target.\n- Casing: tier = UPPERCASE_WITH_UNDERSCORES, confidence = lowercase.\n\nSEARCH RESULTS (${urlsToClassify.length} URLs from ${totalQueries} queries):\n${lines.join('\\n')}`;\n\n try {\n mcpLog('info', `Classifying ${urlsToClassify.length} URLs against objective`, 'llm');\n\n const response = await requestText(\n processor,\n prompt,\n 'Search classification',\n );\n\n if (!response.content) {\n const errMsg = response.error ?? 'LLM returned empty classification response';\n markLLMFailure('planner', errMsg);\n return { result: null, error: errMsg };\n }\n\n // Strip markdown code fences if present\n const cleaned = response.content.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as ClassificationResult;\n\n // Validate the response shape.\n // Note: synthesis is typed not truthy \u2014 the prompt explicitly instructs an empty string\n // for the all-OTHER case, and we must not reject that.\n if (!parsed.title || typeof parsed.synthesis !== 'string' || !Array.isArray(parsed.results)) {\n const errMsg = 'LLM response missing required fields (title, synthesis, results)';\n markLLMFailure('planner', errMsg);\n return { result: null, error: errMsg };\n }\n\n mcpLog('info', `Classification complete: ${parsed.results.filter(r => r.tier === 'HIGHLY_RELEVANT').length} highly relevant`, 'llm');\n markLLMSuccess('planner');\n return { result: parsed };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('error', `Classification failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return { result: null, error: `Classification failed: ${message}` };\n }\n}\n\nexport async function suggestRefineQueriesForRawMode(\n rankedUrls: ReadonlyArray<{\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n }>,\n objective: string,\n originalQueries: readonly string[],\n processor: OpenAI,\n): Promise<{ result: RefineQuerySuggestion[]; error?: string }> {\n const urlsToSummarize = rankedUrls.slice(0, 12);\n const lines = urlsToSummarize.map((url) => {\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n return `[${url.rank}] ${url.title} \u2014 ${domain}`;\n });\n\n const prompt = `You are generating follow-up search queries for an agent using raw web-search results.\n\nReturn ONLY a JSON object (no markdown, no code fences):\n{\n \"refine_queries\": [\n { \"query\": \"next search query\", \"gap_description\": \"what gap this closes\", \"rationale\": \"\u226412 words on why\" }\n ]\n}\n\nOBJECTIVE: ${objective}\n\nPREVIOUS QUERIES (already run \u2014 do NOT paraphrase):\n${originalQueries.map((query) => `- ${query}`).join('\\n')}\n\nTOP RESULT TITLES (to seed new-term probes):\n${lines.join('\\n')}\n\nRULES:\n- Produce 4\u20136 diverse follow-ups. Cover: (a) a primary-source probe (site:, RFC, vendor docs); (b) a temporal sharpener (changelog, version number); (c) a failure-mode or comparison probe; (d) at least one new-term probe seeded by a specific result title.\n- Each query MUST differ from every previousQuery by either a new operator (site:, quotes, a verbatim version number) OR a domain-specific noun absent from every prior query. Adding a year alone does NOT count.\n- Each refine_query MUST include a \\`gap_description\\` naming what the current results don't answer.\n- Do not include URLs.\n- Keep rationales \u226412 words.`;\n\n try {\n const response = await requestText(\n processor,\n prompt,\n 'Raw-mode refine query generation',\n );\n\n if (!response.content) {\n const errMsg = response.error ?? 'LLM returned empty raw-mode refine query response';\n markLLMFailure('planner', errMsg);\n return { result: [], error: errMsg };\n }\n\n const cleaned = response.content.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as { refine_queries?: RefineQuerySuggestion[] };\n\n markLLMSuccess('planner');\n return { result: Array.isArray(parsed.refine_queries) ? parsed.refine_queries : [] };\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('error', `Raw-mode refine query generation failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return { result: [], error: message };\n }\n}\n\n// ============================================================================\n// Research Brief \u2014 goal-aware orientation (called by start-research)\n// ============================================================================\n\nexport type PrimaryBranch = 'reddit' | 'web' | 'both';\n\nexport interface ResearchBriefStep {\n readonly tool: 'web-search' | 'scrape-links';\n readonly reason: string;\n}\n\nexport interface ResearchBrief {\n readonly goal_class: string;\n readonly goal_class_reason: string;\n readonly primary_branch: PrimaryBranch;\n readonly primary_branch_reason: string;\n readonly freshness_window: string;\n readonly first_call_sequence: readonly ResearchBriefStep[];\n readonly keyword_seeds: readonly string[];\n readonly iteration_hints: readonly string[];\n readonly gaps_to_watch: readonly string[];\n readonly stop_criteria: readonly string[];\n}\n\nconst VALID_GOAL_CLASSES = new Set([\n 'spec', 'bug', 'migration', 'sentiment', 'pricing', 'security',\n 'synthesis', 'product_launch', 'other',\n]);\n\nconst VALID_FRESHNESS = new Set(['days', 'weeks', 'months', 'years']);\nconst VALID_BRANCHES = new Set<PrimaryBranch>(['reddit', 'web', 'both']);\nconst VALID_STEP_TOOLS = new Set(['web-search', 'scrape-links']);\n\nfunction isStringArray(value: unknown): value is string[] {\n return Array.isArray(value) && value.every((v) => typeof v === 'string');\n}\n\nfunction isStepArray(value: unknown): value is ResearchBriefStep[] {\n return Array.isArray(value) && value.every((s) => {\n if (typeof s !== 'object' || s === null) return false;\n const tool = (s as Record<string, unknown>).tool;\n const reason = (s as Record<string, unknown>).reason;\n return typeof tool === 'string'\n && VALID_STEP_TOOLS.has(tool)\n && typeof reason === 'string'\n && reason.trim().length > 0;\n });\n}\n\nexport function parseResearchBrief(raw: string): ResearchBrief | null {\n try {\n const cleaned = raw.replace(/^```(?:json)?\\s*\\n?/m, '').replace(/\\n?```\\s*$/m, '').trim();\n const parsed = JSON.parse(cleaned) as Record<string, unknown>;\n\n const goal_class = typeof parsed.goal_class === 'string' ? parsed.goal_class : null;\n if (!goal_class || !VALID_GOAL_CLASSES.has(goal_class)) return null;\n\n const freshness_window = typeof parsed.freshness_window === 'string' ? parsed.freshness_window : null;\n if (!freshness_window || !VALID_FRESHNESS.has(freshness_window)) return null;\n\n const primary_branch = parsed.primary_branch;\n if (typeof primary_branch !== 'string' || !VALID_BRANCHES.has(primary_branch as PrimaryBranch)) return null;\n\n if (!isStepArray(parsed.first_call_sequence) || parsed.first_call_sequence.length === 0) return null;\n if (!isStringArray(parsed.keyword_seeds) || parsed.keyword_seeds.length === 0) return null;\n\n return {\n goal_class,\n goal_class_reason: typeof parsed.goal_class_reason === 'string' ? parsed.goal_class_reason : '',\n primary_branch: primary_branch as PrimaryBranch,\n primary_branch_reason: typeof parsed.primary_branch_reason === 'string' ? parsed.primary_branch_reason : '',\n freshness_window,\n first_call_sequence: parsed.first_call_sequence,\n keyword_seeds: parsed.keyword_seeds.filter((s) => s.trim().length > 0),\n iteration_hints: isStringArray(parsed.iteration_hints) ? parsed.iteration_hints : [],\n gaps_to_watch: isStringArray(parsed.gaps_to_watch) ? parsed.gaps_to_watch : [],\n stop_criteria: isStringArray(parsed.stop_criteria) ? parsed.stop_criteria : [],\n };\n } catch {\n return null;\n }\n}\n\nexport async function generateResearchBrief(\n goal: string,\n processor: OpenAI,\n signal?: AbortSignal,\n): Promise<ResearchBrief | null> {\n const today = new Date().toISOString().slice(0, 10);\n\n const prompt = `You are a research planner. An agent is about to run a multi-pass research loop on the goal below using 3 tools:\n\n - web-search: fan-out Google, scope: web|reddit|both, up to 50 queries per call, parallel-callable (multiple calls per turn)\n - scrape-links: fetch URLs in parallel, auto-detects reddit.com post permalinks \u2192 Reddit API (threaded post+comments); all other URLs \u2192 HTTP scraper; parallel-callable\n\nProduce a tailored JSON brief.\n\nGOAL: ${goal}\nTODAY: ${today}\n\nReturn ONLY a JSON object (no markdown, no code fences):\n\n{\n \"goal_class\": \"spec | bug | migration | sentiment | pricing | security | synthesis | product_launch | other\",\n \"goal_class_reason\": \"one sentence \u2014 why this class\",\n \"primary_branch\": \"reddit | web | both\",\n \"primary_branch_reason\": \"one sentence \u2014 why this branch leads\",\n \"freshness_window\": \"days | weeks | months | years\",\n \"first_call_sequence\": [\n { \"tool\": \"web-search | scrape-links\", \"reason\": \"what this call establishes for the agent\" }\n ],\n \"keyword_seeds\": [\"25\u201350 concrete Google queries \u2014 flat list, to be fired in the first web-search call\"],\n \"iteration_hints\": [\"2\u20135 pointers on which harvested terms / follow-up signals to watch for after pass 1\"],\n \"gaps_to_watch\": [\"2\u20135 concrete questions the agent MUST verify or the answer is incomplete\"],\n \"stop_criteria\": [\"2\u20134 checkable conditions \u2014 all must hold before the agent declares done\"]\n}\n\nRULES:\n\nprimary_branch:\n- \"reddit\" \u2192 sentiment / migration / lived-experience / community-consensus goals. Leads with scope:\"reddit\" web-search.\n- \"web\" \u2192 spec / bug / pricing / CVE / API / primary-source goals. Leads with scope:\"web\" web-search.\n- \"both\" \u2192 opinion-heavy AND needs official sources (e.g. product launch + practitioner reception).\n\nfirst_call_sequence:\n- 1\u20133 steps.\n- reddit-first: step 1 = web-search (caller sets scope:\"reddit\"), step 2 = scrape-links on best post permalinks.\n- web-first: step 1 = web-search (scope:\"web\"), step 2 = scrape-links on HIGHLY_RELEVANT URLs.\n- both: step 1 = two parallel web-search calls (one scope:\"reddit\", one scope:\"web\"), step 2 = merged scrape-links.\n\nkeyword_seeds:\n- 25\u201350 total. Narrow bug \u2192 fewer. Open synthesis \u2192 more.\n- Use operators where helpful (site:, quotes, verbatim version numbers).\n- DIVERSE facets \u2014 same noun-phrase cannot repeat across seeds with adjectives-only variation.\n- Do NOT invent vendor names you are uncertain exist.\n\nfreshness_window:\n- If the goal mentions a recent release / date / version, use \"days\" or \"weeks\".\n- Stable protocols / APIs \u2192 \"months\" or \"years\".`;\n\n try {\n const response = await requestText(\n processor,\n prompt,\n 'Research brief generation',\n signal,\n );\n\n if (!response.content) {\n mcpLog('warning', `Research brief generation returned no content: ${response.error ?? 'unknown'}`, 'llm');\n markLLMFailure('planner', response.error ?? 'empty response');\n return null;\n }\n\n const brief = parseResearchBrief(response.content);\n if (!brief) {\n mcpLog('warning', 'Research brief JSON parse or shape validation failed', 'llm');\n markLLMFailure('planner', 'brief parse/validation failed');\n return null;\n }\n\n markLLMSuccess('planner');\n return brief;\n } catch (err: unknown) {\n const message = err instanceof Error ? err.message : String(err);\n mcpLog('warning', `Research brief generation failed: ${message}`, 'llm');\n markLLMFailure('planner', message);\n return null;\n }\n}\n\nexport function renderResearchBrief(brief: ResearchBrief): string {\n const lines: string[] = [];\n\n lines.push('## Your research brief (goal-tailored)');\n lines.push('');\n lines.push(`**Goal class**: \\`${brief.goal_class}\\` \u2014 ${brief.goal_class_reason}`);\n lines.push(`**Primary branch**: \\`${brief.primary_branch}\\` \u2014 ${brief.primary_branch_reason}`);\n lines.push(`**Freshness**: \\`${brief.freshness_window}\\``);\n lines.push('');\n\n if (brief.first_call_sequence.length > 0) {\n lines.push('### First-call sequence');\n brief.first_call_sequence.forEach((step, i) => {\n lines.push(`${i + 1}. \\`${step.tool}\\` \u2014 ${step.reason}`);\n });\n lines.push('');\n }\n\n if (brief.keyword_seeds.length > 0) {\n lines.push(`### Keyword seeds (${brief.keyword_seeds.length}) \u2014 fire these in your first \\`web-search\\` call as a flat \\`queries\\` array`);\n for (const seed of brief.keyword_seeds) {\n lines.push(`- ${seed}`);\n }\n lines.push('');\n }\n\n if (brief.iteration_hints.length > 0) {\n lines.push('### Iteration hints (harvest new terms from scrape extracts\\' `## Follow-up signals`)');\n for (const hint of brief.iteration_hints) lines.push(`- ${hint}`);\n lines.push('');\n }\n\n if (brief.gaps_to_watch.length > 0) {\n lines.push('### Gaps to watch');\n for (const gap of brief.gaps_to_watch) lines.push(`- ${gap}`);\n lines.push('');\n }\n\n if (brief.stop_criteria.length > 0) {\n lines.push('### Stop criteria');\n for (const c of brief.stop_criteria) lines.push(`- ${c}`);\n lines.push('');\n }\n\n lines.push('---');\n lines.push('');\n lines.push('Fire `first_call_sequence` now. After each `scrape-links`, harvest new terms from `## Follow-up signals` and build your next `web-search` round. Stop when every gap is closed.');\n\n return lines.join('\\n');\n}\n"],
5
+ "mappings": "AAMA,OAAO,YAAY;AACnB,SAAS,gBAAgB,uBAAuB;AAChD;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,cAAc;AAGvB,MAAM,sBAAsB;AAG5B,MAAM,wBAAwB;AAG9B,MAAM,wBAAwB;AAG9B,MAAM,uBAAuB;AAG7B,MAAM,0BAA0B;AAuBhC,MAAM,YAAY;AAAA,EAChB,eAAe;AAAA,EACf,iBAAiB;AAAA,EACjB,sBAAsB;AAAA,EACtB,wBAAwB;AAAA,EACxB,kBAAkB;AAAA,EAClB,oBAAoB;AAAA,EACpB,4BAA4B;AAAA,EAC5B,8BAA8B;AAChC;AAEO,SAAS,eAAe,MAA2B;AACxD,QAAM,MAAK,oBAAI,KAAK,GAAE,YAAY;AAClC,MAAI,SAAS,WAAW;AACtB,cAAU,gBAAgB;AAC1B,cAAU,uBAAuB;AACjC,cAAU,mBAAmB;AAC7B,cAAU,6BAA6B;AAAA,EACzC,OAAO;AACL,cAAU,kBAAkB;AAC5B,cAAU,yBAAyB;AACnC,cAAU,qBAAqB;AAC/B,cAAU,+BAA+B;AAAA,EAC3C;AACF;AAEO,SAAS,eAAe,MAAqB,KAAoB;AACtE,QAAM,MAAK,oBAAI,KAAK,GAAE,YAAY;AAClC,QAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,OAAO,eAAe;AAClF,MAAI,SAAS,WAAW;AACtB,cAAU,gBAAgB;AAC1B,cAAU,uBAAuB;AACjC,cAAU,mBAAmB;AAC7B,cAAU,8BAA8B;AAAA,EAC1C,OAAO;AACL,cAAU,kBAAkB;AAC5B,cAAU,yBAAyB;AACnC,cAAU,qBAAqB;AAC/B,cAAU,gCAAgC;AAAA,EAC5C;AACF;AAEO,SAAS,eAAkC;AAChD,QAAM,MAAM,gBAAgB;AAC5B,SAAO;AAAA,IACL,eAAe,UAAU;AAAA,IACzB,iBAAiB,UAAU;AAAA,IAC3B,sBAAsB,UAAU;AAAA,IAChC,wBAAwB,UAAU;AAAA,IAClC,kBAAkB,UAAU;AAAA,IAC5B,oBAAoB,UAAU;AAAA;AAAA;AAAA,IAG9B,mBAAmB,IAAI;AAAA,IACvB,qBAAqB,IAAI;AAAA,IACzB,4BAA4B,UAAU;AAAA,IACtC,8BAA8B,UAAU;AAAA,EAC1C;AACF;AAGO,SAAS,0BAAgC;AAC9C,YAAU,gBAAgB;AAC1B,YAAU,kBAAkB;AAC5B,YAAU,uBAAuB;AACjC,YAAU,yBAAyB;AACnC,YAAU,mBAAmB;AAC7B,YAAU,qBAAqB;AAC/B,YAAU,6BAA6B;AACvC,YAAU,+BAA+B;AAC3C;AAgBA,MAAM,mBAAmB;AAAA,EACvB,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,YAAY;AACd;AAGA,MAAM,4BAA4B,oBAAI,IAAI;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGD,SAAS,UAAU,OAA6C;AAC9D,SACE,OAAO,UAAU,YACjB,UAAU,QACV,YAAY,SACZ,OAAQ,MAAkC,WAAW;AAEzD;AAEA,IAAI,YAA2B;AAIxB,SAAS,qBAAoC;AAClD,MAAI,CAAC,gBAAgB,EAAE,cAAe,QAAO;AAE7C,MAAI,CAAC,WAAW;AACd,gBAAY,IAAI,OAAO;AAAA,MACrB,SAAS,eAAe;AAAA,MACxB,QAAQ,eAAe;AAAA,MACvB,SAAS;AAAA,MACT,YAAY;AAAA,MACZ,gBAAgB,EAAE,WAAW,yBAAyB;AAAA,IACxD,CAAC;AACD,WAAO,QAAQ,qCAAqC,eAAe,KAAK,cAAc,eAAe,QAAQ,KAAK,KAAK;AAAA,EACzH;AACA,SAAO;AACT;AAEA,SAAS,qBAAqB,OAAe,QAAyC;AACpF,QAAM,cAAuC;AAAA,IAC3C;AAAA,IACA,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,EAC9C;AAEA,MAAI,eAAe,qBAAqB,QAAQ;AAC9C,gBAAY,mBAAmB,eAAe;AAAA,EAChD;AAEA,SAAO;AACT;AAEA,eAAsB,YACpB,WACA,QACA,gBACA,QACoE;AACpE,QAAM,QAAQ,eAAe;AAE7B,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB,CAAC,gBAAgB,UAAU,KAAK,YAAY;AAAA,QAC1C,qBAAqB,OAAO,MAAM;AAAA,QAClC;AAAA,UACE,QAAQ,SAAS,YAAY,IAAI,CAAC,aAAa,MAAM,CAAC,IAAI;AAAA,UAC1D,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MACA;AAAA,MACA;AAAA,MACA,GAAG,cAAc,KAAK,KAAK;AAAA,IAC7B;AAEA,UAAM,UAAU,SAAS,UAAU,CAAC,GAAG,SAAS,SAAS,KAAK;AAC9D,QAAI,SAAS;AACX,aAAO,EAAE,SAAS,MAAM;AAAA,IAC1B;AAEA,UAAM,MAAM,6BAA6B,KAAK;AAC9C,WAAO,WAAW,GAAG,cAAc,qCAAqC,KAAK,IAAI,KAAK;AACtF,WAAO,EAAE,SAAS,MAAM,OAAO,OAAO,IAAI;AAAA,EAC5C,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,WAAW,GAAG,cAAc,qBAAqB,KAAK,KAAK,OAAO,IAAI,KAAK;AAClF,WAAO,EAAE,SAAS,MAAM,OAAO,OAAO,QAAQ;AAAA,EAChD;AACF;AAKA,SAAS,oBAAoB,OAAyB;AACpD,MAAI,CAAC,SAAS,OAAO,UAAU,SAAU,QAAO;AAGhD,QAAM,YAAa,OAA6B;AAChD,MAAI,cAAc,cAAc,cAAc,aAAa;AACzD,WAAO;AAAA,EACT;AAGA,MAAI,UAAU,KAAK,GAAG;AACpB,QAAI,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,OAAO,MAAM,WAAW,KAAK;AACxH,aAAO;AAAA,IACT;AAAA,EACF;AAGA,QAAM,SAAS;AACf,QAAM,OAAO,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO;AAC7D,QAAM,SACJ,OAAO,OAAO,UAAU,YAAY,OAAO,UAAU,OAChD,OAAO,QACR;AACN,QAAM,YACJ,SACC,UAAU,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO,YAC1D,UAAU,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO;AAC7D,MAAI,aAAa,0BAA0B,IAAI,SAAS,GAAG;AACzD,WAAO;AAAA,EACT;AAGA,QAAM,UAAU,OAAO,OAAO,YAAY,WAAW,OAAO,QAAQ,YAAY,IAAI;AACpF,MACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,SAAS,KAC1B,QAAQ,SAAS,WAAW,KAC5B,QAAQ,SAAS,qBAAqB,KACtC,QAAQ,SAAS,cAAc,KAC/B,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,YAAY,GAC7B;AACA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAKA,SAAS,oBAAoB,SAAyB;AACpD,QAAM,mBAAmB,iBAAiB,cAAc,KAAK,IAAI,GAAG,OAAO;AAC3E,QAAM,SAAS,KAAK,OAAO,IAAI,wBAAwB;AACvD,SAAO,KAAK,IAAI,mBAAmB,QAAQ,iBAAiB,UAAU;AACxE;AAOA,eAAsB,sBACpB,SACA,QACA,WACA,QACoB;AAEpB,MAAI,CAAC,OAAO,SAAS;AACnB,WAAO,EAAE,SAAS,WAAW,MAAM;AAAA,EACrC;AAEA,MAAI,CAAC,WAAW;AACd,WAAO;AAAA,MACL;AAAA,MACA,WAAW;AAAA,MACX,OAAO;AAAA,MACP,cAAc;AAAA,QACZ,MAAM,UAAU;AAAA,QAChB,SAAS;AAAA,QACT,WAAW;AAAA,MACb;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAO,EAAE,SAAS,WAAW,IAAI,WAAW,OAAO,OAAO,yBAAyB;AAAA,EACrF;AAGA,QAAM,mBAAmB,QAAQ,SAAS,sBACtC,QAAQ,UAAU,GAAG,mBAAmB,IAAI,0CAC5C;AAKJ,QAAM,WAAW,MAAM;AACrB,QAAI,CAAC,OAAO,IAAK,QAAO;AACxB,QAAI;AACF,YAAM,IAAI,IAAI,IAAI,OAAO,GAAG;AAC5B,aAAO,GAAG,EAAE,MAAM,GAAG,EAAE,QAAQ;AAAA,IACjC,QAAQ;AACN,aAAO;AAAA,IACT;AAAA,EACF,GAAG;AACH,QAAM,UAAU,UAAU,aAAa,OAAO;AAAA;AAAA,IAAS;AAEvD,QAAM,SAAS,OAAO,UAClB;AAAA;AAAA,EAEJ,OAAO,2BAA2B,OAAO,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsDhD,gBAAgB,KACZ;AAAA;AAAA,EAEJ,OAAO;AAAA,EACP,gBAAgB;AAEhB,MAAI;AAGJ,WAAS,UAAU,GAAG,WAAW,iBAAiB,YAAY,WAAW;AACvE,QAAI;AACF,UAAI,YAAY,GAAG;AACjB,eAAO,QAAQ,4BAA4B,eAAe,KAAK,IAAI,KAAK;AAAA,MAC1E,OAAO;AACL,eAAO,WAAW,iBAAiB,OAAO,IAAI,iBAAiB,UAAU,IAAI,KAAK;AAAA,MACpF;AAEA,YAAM,WAAW,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAEA,UAAI,SAAS,SAAS;AACpB,eAAO,QAAQ,0BAA0B,SAAS,QAAQ,MAAM,eAAe,KAAK;AACpF,uBAAe,WAAW;AAC1B,eAAO,EAAE,SAAS,SAAS,SAAS,WAAW,KAAK;AAAA,MACtD;AAGA,aAAO,WAAW,oCAAoC,KAAK;AAC3D,qBAAe,aAAa,6BAA6B;AACzD,aAAO;AAAA,QACL;AAAA,QACA,WAAW;AAAA,QACX,OAAO;AAAA,QACP,cAAc;AAAA,UACZ,MAAM,UAAU;AAAA,UAChB,SAAS;AAAA,UACT,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IAEF,SAAS,KAAc;AACrB,kBAAY,cAAc,GAAG;AAG7B,YAAM,SAAS,UAAU,GAAG,IAAI,IAAI,SAAS;AAC7C,YAAM,OAAO,OAAO,QAAQ,YAAY,QAAQ,QAAQ,UAAU,MAC9D,OAAQ,IAAgC,IAAI,IAC5C;AACJ,aAAO,SAAS,kBAAkB,UAAU,CAAC,MAAM,UAAU,OAAO,YAAY,MAAM,UAAU,IAAI,eAAe,oBAAoB,GAAG,CAAC,KAAK,KAAK;AAGrJ,UAAI,oBAAoB,GAAG,KAAK,UAAU,iBAAiB,YAAY;AACrE,cAAM,UAAU,oBAAoB,OAAO;AAC3C,eAAO,WAAW,eAAe,OAAO,SAAS,KAAK;AACtD,YAAI;AAAE,gBAAM,MAAM,SAAS,MAAM;AAAA,QAAG,QAAQ;AAAE;AAAA,QAAO;AACrD;AAAA,MACF;AAGA;AAAA,IACF;AAAA,EACF;AAGA,QAAM,eAAe,WAAW,WAAW;AAC3C,SAAO,SAAS,wBAAwB,YAAY,iCAAiC,KAAK;AAC1F,iBAAe,aAAa,YAAY;AAExC,SAAO;AAAA,IACL;AAAA;AAAA,IACA,WAAW;AAAA,IACX,OAAO,0BAA0B,YAAY;AAAA,IAC7C,cAAc,aAAa;AAAA,MACzB,MAAM,UAAU;AAAA,MAChB,SAAS;AAAA,MACT,WAAW;AAAA,IACb;AAAA,EACF;AACF;AAOA,MAAM,0BAA0B;AA2ChC,eAAsB,sBACpB,YAQA,WACA,cACA,WACA,kBAAqC,CAAC,GAC4B;AAClE,QAAM,iBAAiB,WAAW,MAAM,GAAG,uBAAuB;AAMlE,QAAM,iBAAiB,CAAC,IAAI,IAAI,IAAI,IAAI,GAAG,GAAG,GAAG,GAAG,GAAG,CAAC;AACxD,QAAM,gBAAgB,CAAC,SAAyB,eAAe,OAAO,CAAC,KAAK;AAG5E,QAAM,QAAkB,CAAC;AACzB,aAAW,OAAO,gBAAgB;AAChC,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACzD,QAAQ;AACN,eAAS,IAAI;AAAA,IACf;AACA,UAAM,UAAU,IAAI,QAAQ,SAAS,MACjC,IAAI,QAAQ,MAAM,GAAG,GAAG,IAAI,QAC5B,IAAI;AACR,UAAM,KAAK,IAAI,IAAI,IAAI,OAAO,cAAc,IAAI,IAAI,CAAC,IAAI,IAAI,KAAK,WAAM,MAAM,WAAM,OAAO,EAAE;AAAA,EAC/F;AAEA,QAAM,mBAAmB,gBAAgB,SAAS,IAC9C,gBAAgB,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE,EAAE,KAAK,IAAI,IAC9C;AACJ,QAAM,SAAQ,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE;AAElD,QAAM,SAAS;AAAA;AAAA,aAEJ,SAAS;AAAA,SACb,KAAK;AAAA;AAAA;AAAA,EAGZ,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAmDD,eAAe,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAMpB,eAAe,MAAM,cAAc,YAAY;AAAA,EAC/D,MAAM,KAAK,IAAI,CAAC;AAEhB,MAAI;AACF,WAAO,QAAQ,eAAe,eAAe,MAAM,2BAA2B,KAAK;AAEnF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,YAAM,SAAS,SAAS,SAAS;AACjC,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,MAAM,OAAO,OAAO;AAAA,IACvC;AAGA,UAAM,UAAU,SAAS,QAAQ,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACrG,UAAM,SAAS,KAAK,MAAM,OAAO;AAKjC,QAAI,CAAC,OAAO,SAAS,OAAO,OAAO,cAAc,YAAY,CAAC,MAAM,QAAQ,OAAO,OAAO,GAAG;AAC3F,YAAM,SAAS;AACf,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,MAAM,OAAO,OAAO;AAAA,IACvC;AAEA,WAAO,QAAQ,4BAA4B,OAAO,QAAQ,OAAO,OAAK,EAAE,SAAS,iBAAiB,EAAE,MAAM,oBAAoB,KAAK;AACnI,mBAAe,SAAS;AACxB,WAAO,EAAE,QAAQ,OAAO;AAAA,EAC1B,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,SAAS,0BAA0B,OAAO,IAAI,KAAK;AAC1D,mBAAe,WAAW,OAAO;AACjC,WAAO,EAAE,QAAQ,MAAM,OAAO,0BAA0B,OAAO,GAAG;AAAA,EACpE;AACF;AAEA,eAAsB,+BACpB,YAKA,WACA,iBACA,WAC8D;AAC9D,QAAM,kBAAkB,WAAW,MAAM,GAAG,EAAE;AAC9C,QAAM,QAAQ,gBAAgB,IAAI,CAAC,QAAQ;AACzC,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACzD,QAAQ;AACN,eAAS,IAAI;AAAA,IACf;AACA,WAAO,IAAI,IAAI,IAAI,KAAK,IAAI,KAAK,WAAM,MAAM;AAAA,EAC/C,CAAC;AAED,QAAM,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aASJ,SAAS;AAAA;AAAA;AAAA,EAGpB,gBAAgB,IAAI,CAAC,UAAU,KAAK,KAAK,EAAE,EAAE,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA,EAGvD,MAAM,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAShB,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,YAAM,SAAS,SAAS,SAAS;AACjC,qBAAe,WAAW,MAAM;AAChC,aAAO,EAAE,QAAQ,CAAC,GAAG,OAAO,OAAO;AAAA,IACrC;AAEA,UAAM,UAAU,SAAS,QAAQ,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACrG,UAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,mBAAe,SAAS;AACxB,WAAO,EAAE,QAAQ,MAAM,QAAQ,OAAO,cAAc,IAAI,OAAO,iBAAiB,CAAC,EAAE;AAAA,EACrF,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,SAAS,4CAA4C,OAAO,IAAI,KAAK;AAC5E,mBAAe,WAAW,OAAO;AACjC,WAAO,EAAE,QAAQ,CAAC,GAAG,OAAO,QAAQ;AAAA,EACtC;AACF;AA0BA,MAAM,qBAAqB,oBAAI,IAAI;AAAA,EACjC;AAAA,EAAQ;AAAA,EAAO;AAAA,EAAa;AAAA,EAAa;AAAA,EAAW;AAAA,EACpD;AAAA,EAAa;AAAA,EAAkB;AACjC,CAAC;AAED,MAAM,kBAAkB,oBAAI,IAAI,CAAC,QAAQ,SAAS,UAAU,OAAO,CAAC;AACpE,MAAM,iBAAiB,oBAAI,IAAmB,CAAC,UAAU,OAAO,MAAM,CAAC;AACvE,MAAM,mBAAmB,oBAAI,IAAI,CAAC,cAAc,cAAc,CAAC;AAE/D,SAAS,cAAc,OAAmC;AACxD,SAAO,MAAM,QAAQ,KAAK,KAAK,MAAM,MAAM,CAAC,MAAM,OAAO,MAAM,QAAQ;AACzE;AAEA,SAAS,YAAY,OAA8C;AACjE,SAAO,MAAM,QAAQ,KAAK,KAAK,MAAM,MAAM,CAAC,MAAM;AAChD,QAAI,OAAO,MAAM,YAAY,MAAM,KAAM,QAAO;AAChD,UAAM,OAAQ,EAA8B;AAC5C,UAAM,SAAU,EAA8B;AAC9C,WAAO,OAAO,SAAS,YAClB,iBAAiB,IAAI,IAAI,KACzB,OAAO,WAAW,YAClB,OAAO,KAAK,EAAE,SAAS;AAAA,EAC9B,CAAC;AACH;AAEO,SAAS,mBAAmB,KAAmC;AACpE,MAAI;AACF,UAAM,UAAU,IAAI,QAAQ,wBAAwB,EAAE,EAAE,QAAQ,eAAe,EAAE,EAAE,KAAK;AACxF,UAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UAAM,aAAa,OAAO,OAAO,eAAe,WAAW,OAAO,aAAa;AAC/E,QAAI,CAAC,cAAc,CAAC,mBAAmB,IAAI,UAAU,EAAG,QAAO;AAE/D,UAAM,mBAAmB,OAAO,OAAO,qBAAqB,WAAW,OAAO,mBAAmB;AACjG,QAAI,CAAC,oBAAoB,CAAC,gBAAgB,IAAI,gBAAgB,EAAG,QAAO;AAExE,UAAM,iBAAiB,OAAO;AAC9B,QAAI,OAAO,mBAAmB,YAAY,CAAC,eAAe,IAAI,cAA+B,EAAG,QAAO;AAEvG,QAAI,CAAC,YAAY,OAAO,mBAAmB,KAAK,OAAO,oBAAoB,WAAW,EAAG,QAAO;AAChG,QAAI,CAAC,cAAc,OAAO,aAAa,KAAK,OAAO,cAAc,WAAW,EAAG,QAAO;AAEtF,WAAO;AAAA,MACL;AAAA,MACA,mBAAmB,OAAO,OAAO,sBAAsB,WAAW,OAAO,oBAAoB;AAAA,MAC7F;AAAA,MACA,uBAAuB,OAAO,OAAO,0BAA0B,WAAW,OAAO,wBAAwB;AAAA,MACzG;AAAA,MACA,qBAAqB,OAAO;AAAA,MAC5B,eAAe,OAAO,cAAc,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC;AAAA,MACrE,iBAAiB,cAAc,OAAO,eAAe,IAAI,OAAO,kBAAkB,CAAC;AAAA,MACnF,eAAe,cAAc,OAAO,aAAa,IAAI,OAAO,gBAAgB,CAAC;AAAA,MAC7E,eAAe,cAAc,OAAO,aAAa,IAAI,OAAO,gBAAgB,CAAC;AAAA,IAC/E;AAAA,EACF,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,eAAsB,sBACpB,MACA,WACA,QAC+B;AAC/B,QAAM,SAAQ,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE;AAElD,QAAM,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAOT,IAAI;AAAA,SACH,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA0CZ,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,QAAI,CAAC,SAAS,SAAS;AACrB,aAAO,WAAW,kDAAkD,SAAS,SAAS,SAAS,IAAI,KAAK;AACxG,qBAAe,WAAW,SAAS,SAAS,gBAAgB;AAC5D,aAAO;AAAA,IACT;AAEA,UAAM,QAAQ,mBAAmB,SAAS,OAAO;AACjD,QAAI,CAAC,OAAO;AACV,aAAO,WAAW,wDAAwD,KAAK;AAC/E,qBAAe,WAAW,+BAA+B;AACzD,aAAO;AAAA,IACT;AAEA,mBAAe,SAAS;AACxB,WAAO;AAAA,EACT,SAAS,KAAc;AACrB,UAAM,UAAU,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC/D,WAAO,WAAW,qCAAqC,OAAO,IAAI,KAAK;AACvE,mBAAe,WAAW,OAAO;AACjC,WAAO;AAAA,EACT;AACF;AAEO,SAAS,oBAAoB,OAA8B;AAChE,QAAM,QAAkB,CAAC;AAEzB,QAAM,KAAK,wCAAwC;AACnD,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,qBAAqB,MAAM,UAAU,aAAQ,MAAM,iBAAiB,EAAE;AACjF,QAAM,KAAK,yBAAyB,MAAM,cAAc,aAAQ,MAAM,qBAAqB,EAAE;AAC7F,QAAM,KAAK,oBAAoB,MAAM,gBAAgB,IAAI;AACzD,QAAM,KAAK,EAAE;AAEb,MAAI,MAAM,oBAAoB,SAAS,GAAG;AACxC,UAAM,KAAK,yBAAyB;AACpC,UAAM,oBAAoB,QAAQ,CAAC,MAAM,MAAM;AAC7C,YAAM,KAAK,GAAG,IAAI,CAAC,OAAO,KAAK,IAAI,aAAQ,KAAK,MAAM,EAAE;AAAA,IAC1D,CAAC;AACD,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,sBAAsB,MAAM,cAAc,MAAM,mFAA8E;AACzI,eAAW,QAAQ,MAAM,eAAe;AACtC,YAAM,KAAK,KAAK,IAAI,EAAE;AAAA,IACxB;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,gBAAgB,SAAS,GAAG;AACpC,UAAM,KAAK,sFAAuF;AAClG,eAAW,QAAQ,MAAM,gBAAiB,OAAM,KAAK,KAAK,IAAI,EAAE;AAChE,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,mBAAmB;AAC9B,eAAW,OAAO,MAAM,cAAe,OAAM,KAAK,KAAK,GAAG,EAAE;AAC5D,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,MAAI,MAAM,cAAc,SAAS,GAAG;AAClC,UAAM,KAAK,mBAAmB;AAC9B,eAAW,KAAK,MAAM,cAAe,OAAM,KAAK,KAAK,CAAC,EAAE;AACxD,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,QAAM,KAAK,KAAK;AAChB,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,iLAAiL;AAE5L,SAAO,MAAM,KAAK,IAAI;AACxB;",
6
6
  "names": []
7
7
  }
@@ -93,6 +93,24 @@ function buildDegradedStub(goal) {
93
93
  ].join("\n");
94
94
  }
95
95
  const buildOrientation = buildStaticScaffolding;
96
+ const PLANNER_FAILURE_THRESHOLD = 2;
97
+ const PLANNER_FAILURE_TTL_MS = 6e4;
98
+ function isPlannerKnownOffline(health, nowMs = Date.now()) {
99
+ if (!health.plannerConfigured) {
100
+ return true;
101
+ }
102
+ if (health.consecutivePlannerFailures < PLANNER_FAILURE_THRESHOLD) {
103
+ return false;
104
+ }
105
+ if (health.lastPlannerCheckedAt === null) {
106
+ return false;
107
+ }
108
+ const lastMs = Date.parse(health.lastPlannerCheckedAt);
109
+ if (Number.isNaN(lastMs)) {
110
+ return false;
111
+ }
112
+ return nowMs - lastMs < PLANNER_FAILURE_TTL_MS;
113
+ }
96
114
  async function buildGoalAwareBrief(goal, signal) {
97
115
  const processor = createLLMProcessor();
98
116
  if (!processor) {
@@ -109,7 +127,7 @@ async function buildGoalAwareBrief(goal, signal) {
109
127
  async function handleStartResearch(params, signal) {
110
128
  try {
111
129
  const llmHealth = getLLMHealth();
112
- const plannerKnownOffline = !llmHealth.plannerConfigured || llmHealth.lastPlannerCheckedAt !== null && !llmHealth.lastPlannerOk;
130
+ const plannerKnownOffline = isPlannerKnownOffline(llmHealth);
113
131
  if (plannerKnownOffline && !params.include_playbook) {
114
132
  const stub = buildDegradedStub(params.goal);
115
133
  return toolSuccess(stub, { content: stub });
@@ -161,9 +179,12 @@ function registerStartResearchTool(server) {
161
179
  );
162
180
  }
163
181
  export {
182
+ PLANNER_FAILURE_THRESHOLD,
183
+ PLANNER_FAILURE_TTL_MS,
164
184
  buildDegradedStub,
165
185
  buildOrientation,
166
186
  buildStaticScaffolding,
187
+ isPlannerKnownOffline,
167
188
  registerStartResearchTool
168
189
  };
169
190
  //# sourceMappingURL=start-research.js.map
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/tools/start-research.ts"],
4
- "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport {\n startResearchOutputSchema,\n startResearchParamsSchema,\n type StartResearchOutput,\n type StartResearchParams,\n} from '../schemas/start-research.js';\nimport {\n createLLMProcessor,\n generateResearchBrief,\n getLLMHealth,\n renderResearchBrief,\n} from '../services/llm-processor.js';\nimport { classifyError } from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\nimport { toolFailure, toolSuccess, toToolResponse, type ToolExecutionResult } from './mcp-helpers.js';\nimport { formatError } from './utils.js';\n\nconst SKILL_INSTALL_HINT = [\n '> \uD83D\uDCA1 **Pair this server with the `run-research` skill** for the full agentic playbook',\n '> (single-agent loop, multi-agent orchestrator, mission-prompt templates, output discipline).',\n '> Install once per machine \u2014 the skill is what teaches the agent how to spend these tools well:',\n '>',\n '> ```bash',\n '> npx -y skills add -y -g yigitkonur/skills-by-yigitkonur/skills/run-research',\n '> ```',\n '>',\n '> Already installed? Skip this \u2014 the skill auto-loads on relevant prompts. The full pack',\n '> ships ~50 sibling skills: `npx -y skills add -y -g yigitkonur/skills-by-yigitkonur`.',\n].join('\\n');\n\n/**\n * Full research-loop playbook. Teaches the 3-tool mental model\n * (start-research, web-search, scrape-links), the aggressive multi-call\n * discipline, parallel-callability, and the cite-from-scrape rule.\n *\n * Emitted when the LLM planner is healthy OR `include_playbook: true`.\n */\nexport function buildStaticScaffolding(goal?: string, opts: { plannerAvailable?: boolean } = {}): string {\n const plannerAvailable = opts.plannerAvailable ?? true;\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not yet specified \u2014 set one on the next pass';\n\n const classifierLoopStep = plannerAvailable\n ? '3. Read the classifier output: `synthesis` (citations in `[rank]`), `gaps[]` (with ids), `refine_queries[]` (follow-ups tied to gap ids).'\n : '3. Classifier output is NOT available (LLM planner offline). `web-search` returns a raw ranked list \u2014 synthesize the terrain yourself from titles + snippets.';\n\n return [\n '# Research session started',\n '',\n SKILL_INSTALL_HINT,\n '',\n focusLine,\n '',\n 'You are running a research LOOP, not answering from memory. Training data is stale; the web is authoritative for anything dated, versioned, priced, or contested. Every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt you read. Never cite a URL from a `web-search` snippet alone.',\n '',\n '## The 3 tools',\n '',\n '**1. `start-research`** \u2014 you just called me. I plan this session and return the brief below. Call me again only if the goal materially shifts.',\n '',\n '**2. `web-search`** \u2014 fan out Google queries in parallel. One call carries **up to 50 queries** in a flat `queries` array. Call me **aggressively** \u2014 2\u20134 rounds per session is normal, not 1. After each pass, read `gaps[]` and `refine_queries[]` and fire another round with the harvested terms. **Parallel-safe**: run multiple `web-search` calls in the same turn for orthogonal subtopics (e.g. one call for \"spec\" queries, one call for \"sentiment\" queries). `scope` values:',\n '- `\"reddit\"` \u2192 server appends `site:reddit.com` and filters to post permalinks. Use for sentiment / migration / lived experience.',\n '- `\"web\"` (default) \u2192 open web. Use for spec / bug / pricing / CVE / API / primary-source hunts.',\n '- `\"both\"` \u2192 fans each query across both. Use when the topic is opinion-heavy AND needs official sources.',\n '',\n '**3. `scrape-links`** \u2014 fetch URLs in parallel and run per-URL LLM extraction. **Auto-detects** `reddit.com/r/.../comments/` permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Mix Reddit + web URLs freely \u2014 both branches run concurrently. **Parallel-safe**: prefer multiple `scrape-links` calls with contextually grouped URLs over one giant mixed batch. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (explicit gaps this page did NOT answer), `## Follow-up signals` (new terms + referenced-but-unscraped URLs that seed your next `web-search` round). Describe extraction SHAPE in `extract`, facets separated by `|`: `root cause | affected versions | fix | workarounds | timeline`.',\n '',\n '## The loop',\n '',\n '1. Read the brief below (if present). Note `primary_branch`, `keyword_seeds`, `gaps_to_watch`, `stop_criteria`.',\n '2. Fire `first_call_sequence` in order. For `primary_branch: reddit`, lead with `web-search scope:\"reddit\"` \u2192 `scrape-links` on the best post permalinks. For `web`, lead with `web-search scope:\"web\"` \u2192 `scrape-links` on HIGHLY_RELEVANT URLs. For `both`, issue two parallel `web-search` calls (one per scope) in the same turn, then one merged `scrape-links`.',\n classifierLoopStep,\n '4. Scrape every HIGHLY_RELEVANT plus the 2\u20133 best MAYBE_RELEVANT. Group URLs into parallel `scrape-links` calls when contexts differ (e.g. one call for docs, one for reddit threads).',\n '5. Harvest from each scrape extract\\'s `## Follow-up signals` \u2014 new terms, version numbers, vendor names, failure modes, referenced URLs. These seed your next `web-search` round.',\n '6. Fire the next `web-search` round with the harvested terms plus any `refine_queries[]` the classifier suggested. Do NOT paraphrase queries already run \u2014 the classifier tracks them.',\n '7. **Stop** when every `gaps_to_watch` item is closed AND the last `web-search` pass surfaced no new terms, OR when you have completed 4 full passes. State remaining gaps explicitly if you hit the cap.',\n '',\n '## Output discipline',\n '',\n '- Cite URL (or Reddit permalink) for every non-trivial claim \u2014 only from a `scrape-links` excerpt you read.',\n '- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people\\'s words.',\n '- Separate documented facts from inferred conclusions explicitly.',\n '- Include the scrape date for time-sensitive claims.',\n '- If you could not verify something, say so \u2014 do not paper over gaps.',\n '- Never cite a URL from a search snippet alone.',\n '',\n '## Post-cutoff discipline',\n '',\n 'For anything released / changed after your training cutoff \u2014 new products, versions, prices, benchmarks \u2014 treat your own query suggestions as hypotheses until a scraped first-party page confirms them. Include `site:<vendor-domain>` queries in your first `web-search` call when the goal names a vendor or product.',\n ].join('\\n');\n}\n\n/**\n * Compact stub emitted when the LLM planner is offline AND the caller did\n * not opt into the full playbook. Names the 3 tools, the loop, parallel-safety,\n * Reddit branch, and cite-from-scrape \u2014 enough to keep an agent moving.\n */\nexport function buildDegradedStub(goal?: string): string {\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not specified \u2014 set one on the next pass.';\n return [\n '# Research session started (LLM planner offline \u2014 compact stub)',\n '',\n SKILL_INSTALL_HINT,\n '',\n focusLine,\n '',\n '**3 tools**: `start-research` (plans), `web-search` (Google fan-out, up to 50 queries/call, `scope: web|reddit|both`), `scrape-links` (fetch URLs in parallel, auto-detects `reddit.com/r/.../comments/` permalinks \u2192 Reddit API; all other URLs \u2192 HTTP scraper). All three are **parallel-callable** \u2014 fire multiple in the same turn when subtopics are orthogonal.',\n '',\n '**Loop**: `web-search` \u2192 `scrape-links` \u2192 read `## Follow-up signals` \u2192 harvest new terms \u2192 next `web-search` round \u2192 stop when gaps close OR after 4 passes. Call `web-search` aggressively (2\u20134 rounds, not 1).',\n '',\n '**Reddit branch**: use `web-search scope:\"reddit\"` for sentiment / migration / lived experience. Skip for CVE / API spec / pricing. Reddit permalinks go straight into `scrape-links` for threaded post + comments.',\n '',\n '**Cite**: every non-trivial claim must trace to a `scrape-links` excerpt, never a search snippet. Quote verbatim for numbers, versions, stacktraces, people\\'s words.',\n '',\n 'Pass `include_playbook: true` to `start-research` for the full tactic reference.',\n ].join('\\n');\n}\n\n/**\n * Backward-compat alias \u2014 older call sites import `buildOrientation` directly.\n */\nexport const buildOrientation = buildStaticScaffolding;\n\nasync function buildGoalAwareBrief(\n goal: string,\n signal?: AbortSignal,\n): Promise<string> {\n const processor = createLLMProcessor();\n if (!processor) {\n mcpLog('info', 'start-research: LLM unavailable, returning static orientation only', 'start-research');\n return '';\n }\n\n const brief = await generateResearchBrief(goal, processor, signal);\n if (!brief) {\n mcpLog('warning', 'start-research: brief generation failed, returning static orientation only', 'start-research');\n return '';\n }\n\n return renderResearchBrief(brief);\n}\n\nasync function handleStartResearch(\n params: StartResearchParams,\n signal?: AbortSignal,\n): Promise<ToolExecutionResult<StartResearchOutput>> {\n try {\n const llmHealth = getLLMHealth();\n const plannerKnownOffline = !llmHealth.plannerConfigured\n || (llmHealth.lastPlannerCheckedAt !== null && !llmHealth.lastPlannerOk);\n\n if (plannerKnownOffline && !params.include_playbook) {\n const stub = buildDegradedStub(params.goal);\n return toolSuccess(stub, { content: stub });\n }\n\n const scaffolding = buildStaticScaffolding(params.goal, {\n plannerAvailable: !plannerKnownOffline,\n });\n\n let brief = '';\n if (params.goal) {\n brief = await buildGoalAwareBrief(params.goal, signal);\n }\n\n const briefFallbackNote = params.goal && !brief\n ? '\\n\\n---\\n\\n> _Goal-tailored brief unavailable: LLM planner is not configured or failed this call. The static playbook above still applies; you can proceed with it, or retry `start-research` after verifying `LLM_API_KEY`._'\n : '';\n\n const content = brief\n ? `${scaffolding}\\n\\n---\\n\\n${brief}`\n : `${scaffolding}${briefFallbackNote}`;\n\n return toolSuccess(content, { content });\n } catch (err: unknown) {\n const structuredError = classifyError(err);\n mcpLog('error', `start-research: ${structuredError.message}`, 'start-research');\n return toolFailure(\n formatError({\n code: structuredError.code,\n message: structuredError.message,\n retryable: structuredError.retryable,\n toolName: 'start-research',\n howToFix: ['Retry start-research. If the failure persists, verify LLM_API_KEY / LLM_BASE_URL / LLM_MODEL.'],\n }),\n );\n }\n}\n\nexport function registerStartResearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'start-research',\n title: 'Start Research Session',\n description:\n 'Call this FIRST every research session. Provide a `goal`; I return a goal-tailored brief naming (a) `primary_branch` (reddit for sentiment/migration, web for spec/bug/pricing, both when opinion-heavy AND needs official sources), (b) the exact `first_call_sequence` of web-search + scrape-links calls to fire, (c) 25\u201350 keyword seeds for your first `web-search` call, (d) iteration hints, (e) gaps to watch, (f) stop criteria. No goal? You still get the generic 3-tool playbook. Other tools work without calling this, but you will use them worse.',\n schema: startResearchParamsSchema,\n outputSchema: startResearchOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: false,\n },\n },\n async (args) => toToolResponse(await handleStartResearch(args)),\n );\n}\n"],
5
- "mappings": "AAEA;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,qBAAqB;AAC9B,SAAS,cAAc;AACvB,SAAS,aAAa,aAAa,sBAAgD;AACnF,SAAS,mBAAmB;AAE5B,MAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,EAAE,KAAK,IAAI;AASJ,SAAS,uBAAuB,MAAe,OAAuC,CAAC,GAAW;AACvG,QAAM,mBAAmB,KAAK,oBAAoB;AAClD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AAEJ,QAAM,qBAAqB,mBACvB,8IACA;AAEJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOO,SAAS,kBAAkB,MAAuB;AACvD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AACJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAKO,MAAM,mBAAmB;AAEhC,eAAe,oBACb,MACA,QACiB;AACjB,QAAM,YAAY,mBAAmB;AACrC,MAAI,CAAC,WAAW;AACd,WAAO,QAAQ,sEAAsE,gBAAgB;AACrG,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,MAAM,sBAAsB,MAAM,WAAW,MAAM;AACjE,MAAI,CAAC,OAAO;AACV,WAAO,WAAW,8EAA8E,gBAAgB;AAChH,WAAO;AAAA,EACT;AAEA,SAAO,oBAAoB,KAAK;AAClC;AAEA,eAAe,oBACb,QACA,QACmD;AACnD,MAAI;AACF,UAAM,YAAY,aAAa;AAC/B,UAAM,sBAAsB,CAAC,UAAU,qBACjC,UAAU,yBAAyB,QAAQ,CAAC,UAAU;AAE5D,QAAI,uBAAuB,CAAC,OAAO,kBAAkB;AACnD,YAAM,OAAO,kBAAkB,OAAO,IAAI;AAC1C,aAAO,YAAY,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,IAC5C;AAEA,UAAM,cAAc,uBAAuB,OAAO,MAAM;AAAA,MACtD,kBAAkB,CAAC;AAAA,IACrB,CAAC;AAED,QAAI,QAAQ;AACZ,QAAI,OAAO,MAAM;AACf,cAAQ,MAAM,oBAAoB,OAAO,MAAM,MAAM;AAAA,IACvD;AAEA,UAAM,oBAAoB,OAAO,QAAQ,CAAC,QACtC,kOACA;AAEJ,UAAM,UAAU,QACZ,GAAG,WAAW;AAAA;AAAA;AAAA;AAAA,EAAc,KAAK,KACjC,GAAG,WAAW,GAAG,iBAAiB;AAEtC,WAAO,YAAY,SAAS,EAAE,QAAQ,CAAC;AAAA,EACzC,SAAS,KAAc;AACrB,UAAM,kBAAkB,cAAc,GAAG;AACzC,WAAO,SAAS,mBAAmB,gBAAgB,OAAO,IAAI,gBAAgB;AAC9E,WAAO;AAAA,MACL,YAAY;AAAA,QACV,MAAM,gBAAgB;AAAA,QACtB,SAAS,gBAAgB;AAAA,QACzB,WAAW,gBAAgB;AAAA,QAC3B,UAAU;AAAA,QACV,UAAU,CAAC,+FAA+F;AAAA,MAC5G,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEO,SAAS,0BAA0B,QAAyB;AACjE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,SAAS,eAAe,MAAM,oBAAoB,IAAI,CAAC;AAAA,EAChE;AACF;",
4
+ "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport {\n startResearchOutputSchema,\n startResearchParamsSchema,\n type StartResearchOutput,\n type StartResearchParams,\n} from '../schemas/start-research.js';\nimport {\n createLLMProcessor,\n generateResearchBrief,\n getLLMHealth,\n renderResearchBrief,\n type LLMHealthSnapshot,\n} from '../services/llm-processor.js';\nimport { classifyError } from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\nimport { toolFailure, toolSuccess, toToolResponse, type ToolExecutionResult } from './mcp-helpers.js';\nimport { formatError } from './utils.js';\n\nconst SKILL_INSTALL_HINT = [\n '> \uD83D\uDCA1 **Pair this server with the `run-research` skill** for the full agentic playbook',\n '> (single-agent loop, multi-agent orchestrator, mission-prompt templates, output discipline).',\n '> Install once per machine \u2014 the skill is what teaches the agent how to spend these tools well:',\n '>',\n '> ```bash',\n '> npx -y skills add -y -g yigitkonur/skills-by-yigitkonur/skills/run-research',\n '> ```',\n '>',\n '> Already installed? Skip this \u2014 the skill auto-loads on relevant prompts. The full pack',\n '> ships ~50 sibling skills: `npx -y skills add -y -g yigitkonur/skills-by-yigitkonur`.',\n].join('\\n');\n\n/**\n * Full research-loop playbook. Teaches the 3-tool mental model\n * (start-research, web-search, scrape-links), the aggressive multi-call\n * discipline, parallel-callability, and the cite-from-scrape rule.\n *\n * Emitted when the LLM planner is healthy OR `include_playbook: true`.\n */\nexport function buildStaticScaffolding(goal?: string, opts: { plannerAvailable?: boolean } = {}): string {\n const plannerAvailable = opts.plannerAvailable ?? true;\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not yet specified \u2014 set one on the next pass';\n\n const classifierLoopStep = plannerAvailable\n ? '3. Read the classifier output: `synthesis` (citations in `[rank]`), `gaps[]` (with ids), `refine_queries[]` (follow-ups tied to gap ids).'\n : '3. Classifier output is NOT available (LLM planner offline). `web-search` returns a raw ranked list \u2014 synthesize the terrain yourself from titles + snippets.';\n\n return [\n '# Research session started',\n '',\n SKILL_INSTALL_HINT,\n '',\n focusLine,\n '',\n 'You are running a research LOOP, not answering from memory. Training data is stale; the web is authoritative for anything dated, versioned, priced, or contested. Every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt you read. Never cite a URL from a `web-search` snippet alone.',\n '',\n '## The 3 tools',\n '',\n '**1. `start-research`** \u2014 you just called me. I plan this session and return the brief below. Call me again only if the goal materially shifts.',\n '',\n '**2. `web-search`** \u2014 fan out Google queries in parallel. One call carries **up to 50 queries** in a flat `queries` array. Call me **aggressively** \u2014 2\u20134 rounds per session is normal, not 1. After each pass, read `gaps[]` and `refine_queries[]` and fire another round with the harvested terms. **Parallel-safe**: run multiple `web-search` calls in the same turn for orthogonal subtopics (e.g. one call for \"spec\" queries, one call for \"sentiment\" queries). `scope` values:',\n '- `\"reddit\"` \u2192 server appends `site:reddit.com` and filters to post permalinks. Use for sentiment / migration / lived experience.',\n '- `\"web\"` (default) \u2192 open web. Use for spec / bug / pricing / CVE / API / primary-source hunts.',\n '- `\"both\"` \u2192 fans each query across both. Use when the topic is opinion-heavy AND needs official sources.',\n '',\n '**3. `scrape-links`** \u2014 fetch URLs in parallel and run per-URL LLM extraction. **Auto-detects** `reddit.com/r/.../comments/` permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Mix Reddit + web URLs freely \u2014 both branches run concurrently. **Parallel-safe**: prefer multiple `scrape-links` calls with contextually grouped URLs over one giant mixed batch. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (explicit gaps this page did NOT answer), `## Follow-up signals` (new terms + referenced-but-unscraped URLs that seed your next `web-search` round). Describe extraction SHAPE in `extract`, facets separated by `|`: `root cause | affected versions | fix | workarounds | timeline`.',\n '',\n '## The loop',\n '',\n '1. Read the brief below (if present). Note `primary_branch`, `keyword_seeds`, `gaps_to_watch`, `stop_criteria`.',\n '2. Fire `first_call_sequence` in order. For `primary_branch: reddit`, lead with `web-search scope:\"reddit\"` \u2192 `scrape-links` on the best post permalinks. For `web`, lead with `web-search scope:\"web\"` \u2192 `scrape-links` on HIGHLY_RELEVANT URLs. For `both`, issue two parallel `web-search` calls (one per scope) in the same turn, then one merged `scrape-links`.',\n classifierLoopStep,\n '4. Scrape every HIGHLY_RELEVANT plus the 2\u20133 best MAYBE_RELEVANT. Group URLs into parallel `scrape-links` calls when contexts differ (e.g. one call for docs, one for reddit threads).',\n '5. Harvest from each scrape extract\\'s `## Follow-up signals` \u2014 new terms, version numbers, vendor names, failure modes, referenced URLs. These seed your next `web-search` round.',\n '6. Fire the next `web-search` round with the harvested terms plus any `refine_queries[]` the classifier suggested. Do NOT paraphrase queries already run \u2014 the classifier tracks them.',\n '7. **Stop** when every `gaps_to_watch` item is closed AND the last `web-search` pass surfaced no new terms, OR when you have completed 4 full passes. State remaining gaps explicitly if you hit the cap.',\n '',\n '## Output discipline',\n '',\n '- Cite URL (or Reddit permalink) for every non-trivial claim \u2014 only from a `scrape-links` excerpt you read.',\n '- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people\\'s words.',\n '- Separate documented facts from inferred conclusions explicitly.',\n '- Include the scrape date for time-sensitive claims.',\n '- If you could not verify something, say so \u2014 do not paper over gaps.',\n '- Never cite a URL from a search snippet alone.',\n '',\n '## Post-cutoff discipline',\n '',\n 'For anything released / changed after your training cutoff \u2014 new products, versions, prices, benchmarks \u2014 treat your own query suggestions as hypotheses until a scraped first-party page confirms them. Include `site:<vendor-domain>` queries in your first `web-search` call when the goal names a vendor or product.',\n ].join('\\n');\n}\n\n/**\n * Compact stub emitted when the LLM planner is offline AND the caller did\n * not opt into the full playbook. Names the 3 tools, the loop, parallel-safety,\n * Reddit branch, and cite-from-scrape \u2014 enough to keep an agent moving.\n */\nexport function buildDegradedStub(goal?: string): string {\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not specified \u2014 set one on the next pass.';\n return [\n '# Research session started (LLM planner offline \u2014 compact stub)',\n '',\n SKILL_INSTALL_HINT,\n '',\n focusLine,\n '',\n '**3 tools**: `start-research` (plans), `web-search` (Google fan-out, up to 50 queries/call, `scope: web|reddit|both`), `scrape-links` (fetch URLs in parallel, auto-detects `reddit.com/r/.../comments/` permalinks \u2192 Reddit API; all other URLs \u2192 HTTP scraper). All three are **parallel-callable** \u2014 fire multiple in the same turn when subtopics are orthogonal.',\n '',\n '**Loop**: `web-search` \u2192 `scrape-links` \u2192 read `## Follow-up signals` \u2192 harvest new terms \u2192 next `web-search` round \u2192 stop when gaps close OR after 4 passes. Call `web-search` aggressively (2\u20134 rounds, not 1).',\n '',\n '**Reddit branch**: use `web-search scope:\"reddit\"` for sentiment / migration / lived experience. Skip for CVE / API spec / pricing. Reddit permalinks go straight into `scrape-links` for threaded post + comments.',\n '',\n '**Cite**: every non-trivial claim must trace to a `scrape-links` excerpt, never a search snippet. Quote verbatim for numbers, versions, stacktraces, people\\'s words.',\n '',\n 'Pass `include_playbook: true` to `start-research` for the full tactic reference.',\n ].join('\\n');\n}\n\n/**\n * Backward-compat alias \u2014 older call sites import `buildOrientation` directly.\n */\nexport const buildOrientation = buildStaticScaffolding;\n\n// ============================================================================\n// Planner-offline gate.\n//\n// The problem we are guarding against: a single transient LLM failure (one bad\n// 429, one malformed JSON response from the classifier) used to poison the\n// gate forever and force every subsequent `start-research` call into the\n// compact stub \u2014 even when env was fine and the next call would have\n// succeeded. That created a deadlock where the very tool that could reset\n// the health flag was the tool being blocked.\n//\n// The safer semantics implemented here:\n// 1. If env is not configured, we are offline. Hard stop.\n// 2. Otherwise, require **two consecutive failures** before gating (one\n// blip is tolerated).\n// 3. Even then, the gate only holds for PLANNER_FAILURE_TTL_MS after the\n// most recent failure. After that window we give the planner another\n// chance regardless of the counter \u2014 if it is still broken the next\n// call's failure will re-arm the gate.\n// 4. Any success resets the counter to 0, so the gate opens immediately\n// on recovery.\n// ============================================================================\n\n/** Minimum consecutive failures before the gate closes. */\nexport const PLANNER_FAILURE_THRESHOLD = 2;\n\n/** How long a recent failure burst keeps the gate closed, in ms. */\nexport const PLANNER_FAILURE_TTL_MS = 60_000;\n\ntype PlannerGateHealth = Pick<\n LLMHealthSnapshot,\n 'plannerConfigured' | 'consecutivePlannerFailures' | 'lastPlannerCheckedAt'\n>;\n\n/**\n * Pure predicate \u2014 returns true when the planner should be treated as\n * offline for the purposes of `start-research`. Kept exported and\n * dependency-free so tests can drive it without touching the LLM.\n */\nexport function isPlannerKnownOffline(\n health: PlannerGateHealth,\n nowMs: number = Date.now(),\n): boolean {\n if (!health.plannerConfigured) {\n return true;\n }\n if (health.consecutivePlannerFailures < PLANNER_FAILURE_THRESHOLD) {\n return false;\n }\n if (health.lastPlannerCheckedAt === null) {\n return false;\n }\n const lastMs = Date.parse(health.lastPlannerCheckedAt);\n if (Number.isNaN(lastMs)) {\n return false;\n }\n return nowMs - lastMs < PLANNER_FAILURE_TTL_MS;\n}\n\nasync function buildGoalAwareBrief(\n goal: string,\n signal?: AbortSignal,\n): Promise<string> {\n const processor = createLLMProcessor();\n if (!processor) {\n mcpLog('info', 'start-research: LLM unavailable, returning static orientation only', 'start-research');\n return '';\n }\n\n const brief = await generateResearchBrief(goal, processor, signal);\n if (!brief) {\n mcpLog('warning', 'start-research: brief generation failed, returning static orientation only', 'start-research');\n return '';\n }\n\n return renderResearchBrief(brief);\n}\n\nasync function handleStartResearch(\n params: StartResearchParams,\n signal?: AbortSignal,\n): Promise<ToolExecutionResult<StartResearchOutput>> {\n try {\n const llmHealth = getLLMHealth();\n const plannerKnownOffline = isPlannerKnownOffline(llmHealth);\n\n if (plannerKnownOffline && !params.include_playbook) {\n const stub = buildDegradedStub(params.goal);\n return toolSuccess(stub, { content: stub });\n }\n\n const scaffolding = buildStaticScaffolding(params.goal, {\n plannerAvailable: !plannerKnownOffline,\n });\n\n let brief = '';\n if (params.goal) {\n brief = await buildGoalAwareBrief(params.goal, signal);\n }\n\n const briefFallbackNote = params.goal && !brief\n ? '\\n\\n---\\n\\n> _Goal-tailored brief unavailable: LLM planner is not configured or failed this call. The static playbook above still applies; you can proceed with it, or retry `start-research` after verifying `LLM_API_KEY`._'\n : '';\n\n const content = brief\n ? `${scaffolding}\\n\\n---\\n\\n${brief}`\n : `${scaffolding}${briefFallbackNote}`;\n\n return toolSuccess(content, { content });\n } catch (err: unknown) {\n const structuredError = classifyError(err);\n mcpLog('error', `start-research: ${structuredError.message}`, 'start-research');\n return toolFailure(\n formatError({\n code: structuredError.code,\n message: structuredError.message,\n retryable: structuredError.retryable,\n toolName: 'start-research',\n howToFix: ['Retry start-research. If the failure persists, verify LLM_API_KEY / LLM_BASE_URL / LLM_MODEL.'],\n }),\n );\n }\n}\n\nexport function registerStartResearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'start-research',\n title: 'Start Research Session',\n description:\n 'Call this FIRST every research session. Provide a `goal`; I return a goal-tailored brief naming (a) `primary_branch` (reddit for sentiment/migration, web for spec/bug/pricing, both when opinion-heavy AND needs official sources), (b) the exact `first_call_sequence` of web-search + scrape-links calls to fire, (c) 25\u201350 keyword seeds for your first `web-search` call, (d) iteration hints, (e) gaps to watch, (f) stop criteria. No goal? You still get the generic 3-tool playbook. Other tools work without calling this, but you will use them worse.',\n schema: startResearchParamsSchema,\n outputSchema: startResearchOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: false,\n },\n },\n async (args) => toToolResponse(await handleStartResearch(args)),\n );\n}\n"],
5
+ "mappings": "AAEA;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,qBAAqB;AAC9B,SAAS,cAAc;AACvB,SAAS,aAAa,aAAa,sBAAgD;AACnF,SAAS,mBAAmB;AAE5B,MAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,EAAE,KAAK,IAAI;AASJ,SAAS,uBAAuB,MAAe,OAAuC,CAAC,GAAW;AACvG,QAAM,mBAAmB,KAAK,oBAAoB;AAClD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AAEJ,QAAM,qBAAqB,mBACvB,8IACA;AAEJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOO,SAAS,kBAAkB,MAAuB;AACvD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AACJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAKO,MAAM,mBAAmB;AAyBzB,MAAM,4BAA4B;AAGlC,MAAM,yBAAyB;AAY/B,SAAS,sBACd,QACA,QAAgB,KAAK,IAAI,GAChB;AACT,MAAI,CAAC,OAAO,mBAAmB;AAC7B,WAAO;AAAA,EACT;AACA,MAAI,OAAO,6BAA6B,2BAA2B;AACjE,WAAO;AAAA,EACT;AACA,MAAI,OAAO,yBAAyB,MAAM;AACxC,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,MAAM,OAAO,oBAAoB;AACrD,MAAI,OAAO,MAAM,MAAM,GAAG;AACxB,WAAO;AAAA,EACT;AACA,SAAO,QAAQ,SAAS;AAC1B;AAEA,eAAe,oBACb,MACA,QACiB;AACjB,QAAM,YAAY,mBAAmB;AACrC,MAAI,CAAC,WAAW;AACd,WAAO,QAAQ,sEAAsE,gBAAgB;AACrG,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,MAAM,sBAAsB,MAAM,WAAW,MAAM;AACjE,MAAI,CAAC,OAAO;AACV,WAAO,WAAW,8EAA8E,gBAAgB;AAChH,WAAO;AAAA,EACT;AAEA,SAAO,oBAAoB,KAAK;AAClC;AAEA,eAAe,oBACb,QACA,QACmD;AACnD,MAAI;AACF,UAAM,YAAY,aAAa;AAC/B,UAAM,sBAAsB,sBAAsB,SAAS;AAE3D,QAAI,uBAAuB,CAAC,OAAO,kBAAkB;AACnD,YAAM,OAAO,kBAAkB,OAAO,IAAI;AAC1C,aAAO,YAAY,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,IAC5C;AAEA,UAAM,cAAc,uBAAuB,OAAO,MAAM;AAAA,MACtD,kBAAkB,CAAC;AAAA,IACrB,CAAC;AAED,QAAI,QAAQ;AACZ,QAAI,OAAO,MAAM;AACf,cAAQ,MAAM,oBAAoB,OAAO,MAAM,MAAM;AAAA,IACvD;AAEA,UAAM,oBAAoB,OAAO,QAAQ,CAAC,QACtC,kOACA;AAEJ,UAAM,UAAU,QACZ,GAAG,WAAW;AAAA;AAAA;AAAA;AAAA,EAAc,KAAK,KACjC,GAAG,WAAW,GAAG,iBAAiB;AAEtC,WAAO,YAAY,SAAS,EAAE,QAAQ,CAAC;AAAA,EACzC,SAAS,KAAc;AACrB,UAAM,kBAAkB,cAAc,GAAG;AACzC,WAAO,SAAS,mBAAmB,gBAAgB,OAAO,IAAI,gBAAgB;AAC9E,WAAO;AAAA,MACL,YAAY;AAAA,QACV,MAAM,gBAAgB;AAAA,QACtB,SAAS,gBAAgB;AAAA,QACzB,WAAW,gBAAgB;AAAA,QAC3B,UAAU;AAAA,QACV,UAAU,CAAC,+FAA+F;AAAA,MAC5G,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEO,SAAS,0BAA0B,QAAyB;AACjE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,SAAS,eAAe,MAAM,oBAAoB,IAAI,CAAC;AAAA,EAChE;AACF;",
6
6
  "names": []
7
7
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-researchpowerpack",
3
- "version": "6.0.0",
3
+ "version": "6.0.1",
4
4
  "description": "HTTP-first MCP research server: start-research (goal-tailored brief), web-search (with Reddit scope), scrape-links (auto-detects Reddit URLs) — built on mcp-use.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",