mcp-researchpowerpack 6.0.6 → 6.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/mcp-use.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "includeInspector": false,
3
- "buildTime": "2026-04-24T03:09:43.660Z",
4
- "buildId": "b5cf2231e485cf89",
3
+ "buildTime": "2026-04-24T03:32:40.444Z",
4
+ "buildId": "292f500ec0b6b07f",
5
5
  "entryPoint": "dist/index.js",
6
6
  "widgets": {}
7
7
  }
@@ -103,7 +103,7 @@ async function fetchWebBranch(inputs, client) {
103
103
  successItems: [],
104
104
  failedContents: [],
105
105
  metrics: { successful: 0, failed: 0, totalCredits: 0 },
106
- binaryDeferred: []
106
+ jinaFallbacks: []
107
107
  };
108
108
  }
109
109
  mcpLog("info", `[concurrency] web branch: fanning out ${inputs.length} URL(s) with limit=${CONCURRENCY.SCRAPER}`, "scrape");
@@ -112,7 +112,7 @@ async function fetchWebBranch(inputs, client) {
112
112
  const urlToIndex = new Map(inputs.map((i) => [i.url, i.origIndex]));
113
113
  const successItems = [];
114
114
  const failedContents = [];
115
- const binaryDeferred = [];
115
+ const jinaFallbacks = [];
116
116
  let successful = 0;
117
117
  let failed = 0;
118
118
  let totalCredits = 0;
@@ -127,18 +127,28 @@ async function fetchWebBranch(inputs, client) {
127
127
  continue;
128
128
  }
129
129
  if (result.error?.code === ErrorCode.UNSUPPORTED_BINARY_CONTENT) {
130
- binaryDeferred.push({
130
+ jinaFallbacks.push({
131
131
  url: result.url,
132
- origIndex: urlToIndex.get(result.url) ?? origIndex
132
+ origIndex: urlToIndex.get(result.url) ?? origIndex,
133
+ reason: "binary_content"
133
134
  });
134
135
  continue;
135
136
  }
136
- if (result.error || result.statusCode < 200 || result.statusCode >= 300) {
137
+ const scrapeFailed = Boolean(result.error) || result.statusCode < 200 || result.statusCode >= 300;
138
+ if (scrapeFailed && result.statusCode !== 404) {
139
+ jinaFallbacks.push({
140
+ url: result.url,
141
+ origIndex: urlToIndex.get(result.url) ?? origIndex,
142
+ reason: "scrape_failed",
143
+ scrapeError: result.error?.message || result.content || `HTTP ${result.statusCode}`
144
+ });
145
+ continue;
146
+ }
147
+ if (scrapeFailed) {
137
148
  failed++;
138
- const errorMsg = result.error?.message || result.content || `HTTP ${result.statusCode}`;
139
149
  failedContents.push(`## ${result.url}
140
150
 
141
- \u274C Failed to scrape: ${errorMsg}`);
151
+ \u274C Failed to scrape: HTTP 404 \u2014 Page not found`);
142
152
  continue;
143
153
  }
144
154
  successful++;
@@ -157,10 +167,20 @@ async function fetchWebBranch(inputs, client) {
157
167
  successItems,
158
168
  failedContents,
159
169
  metrics: { successful, failed, totalCredits },
160
- binaryDeferred
170
+ jinaFallbacks
161
171
  };
162
172
  }
163
- async function fetchDocumentBranch(inputs, jinaClient) {
173
+ function formatJinaFailure(url, jinaError, scrapeError) {
174
+ if (scrapeError) {
175
+ return `## ${url}
176
+
177
+ \u274C Both scrapers failed. Scrape.do: ${scrapeError}. Jina Reader: ${jinaError}.`;
178
+ }
179
+ return `## ${url}
180
+
181
+ \u274C Document conversion failed: ${jinaError}`;
182
+ }
183
+ async function fetchDocumentBranch(inputs, jinaClient, scrapeErrorContext) {
164
184
  if (inputs.length === 0) {
165
185
  return { successItems: [], failedContents: [], metrics: { successful: 0, failed: 0, totalCredits: 0 } };
166
186
  }
@@ -181,28 +201,23 @@ async function fetchDocumentBranch(inputs, jinaClient) {
181
201
  for (let i = 0; i < results.length; i++) {
182
202
  const settled = results[i];
183
203
  const input = inputs[i];
204
+ const scrapeError = scrapeErrorContext?.get(input.url);
184
205
  if (!settled) {
185
206
  failed++;
186
- failedContents.push(`## ${input.url}
187
-
188
- \u274C No result returned (document conversion)`);
207
+ failedContents.push(formatJinaFailure(input.url, "No result returned", scrapeError));
189
208
  continue;
190
209
  }
191
210
  if (settled.status === "rejected") {
192
211
  failed++;
193
212
  const reason = settled.reason instanceof Error ? settled.reason.message : String(settled.reason);
194
- failedContents.push(`## ${input.url}
195
-
196
- \u274C Document conversion failed: ${reason}`);
213
+ failedContents.push(formatJinaFailure(input.url, reason, scrapeError));
197
214
  continue;
198
215
  }
199
216
  const result = settled.value;
200
217
  if (result.error || result.statusCode < 200 || result.statusCode >= 300) {
201
218
  failed++;
202
219
  const errorMsg = result.error?.message || `HTTP ${result.statusCode}`;
203
- failedContents.push(`## ${input.url}
204
-
205
- \u274C Document conversion failed: ${errorMsg}`);
220
+ failedContents.push(formatJinaFailure(input.url, errorMsg, scrapeError));
206
221
  continue;
207
222
  }
208
223
  successful++;
@@ -446,7 +461,7 @@ async function handleScrapeLinks(params, reporter = NOOP_REPORTER) {
446
461
  successItems: [],
447
462
  failedContents: [],
448
463
  metrics: { successful: 0, failed: 0, totalCredits: 0 },
449
- binaryDeferred: []
464
+ jinaFallbacks: []
450
465
  };
451
466
  const [webPhase, redditPhase, documentPhase] = await Promise.all([
452
467
  webInputs.length > 0 ? fetchWebBranch(webInputs, clients.client) : Promise.resolve(emptyPhase),
@@ -458,12 +473,21 @@ async function handleScrapeLinks(params, reporter = NOOP_REPORTER) {
458
473
  failedContents: [],
459
474
  metrics: { successful: 0, failed: 0, totalCredits: 0 }
460
475
  };
461
- if (webPhase.binaryDeferred.length > 0) {
476
+ if (webPhase.jinaFallbacks.length > 0) {
477
+ const binaryCount = webPhase.jinaFallbacks.filter((f) => f.reason === "binary_content").length;
478
+ const failedCount = webPhase.jinaFallbacks.length - binaryCount;
462
479
  await reporter.log(
463
480
  "info",
464
- `Rerouting ${webPhase.binaryDeferred.length} binary URL(s) from Scrape.do \u2192 Jina Reader`
481
+ `Rerouting ${webPhase.jinaFallbacks.length} URL(s) to Jina Reader: ${binaryCount} binary, ${failedCount} scrape-failed`
482
+ );
483
+ const fallbackInputs = webPhase.jinaFallbacks.map((f) => ({
484
+ url: f.url,
485
+ origIndex: f.origIndex
486
+ }));
487
+ const errorContext = new Map(
488
+ webPhase.jinaFallbacks.filter((f) => f.scrapeError !== void 0).map((f) => [f.url, f.scrapeError])
465
489
  );
466
- deferredPhase = await fetchDocumentBranch(webPhase.binaryDeferred, clients.jinaClient);
490
+ deferredPhase = await fetchDocumentBranch(fallbackInputs, clients.jinaClient, errorContext);
467
491
  }
468
492
  const successItems = [
469
493
  ...webPhase.successItems,
@@ -546,6 +570,7 @@ function registerScrapeLinksTool(server) {
546
570
  );
547
571
  }
548
572
  export {
573
+ formatJinaFailure,
549
574
  handleScrapeLinks,
550
575
  registerScrapeLinksTool
551
576
  };
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/tools/scrape.ts"],
4
- "sourcesContent": ["/**\n * Scrape Links Tool Handler\n *\n * Scrapes many URLs in parallel. Reddit permalinks (reddit.com/r/.../comments/...)\n * are auto-detected and routed through the Reddit API; all other URLs go through\n * the scraper. Both branches feed the same per-URL LLM extraction pipeline.\n *\n * NEVER throws \u2014 every error is returned as a tool-level failure response.\n */\n\nimport type { MCPServer } from 'mcp-use/server';\n\nimport {\n SCRAPER,\n CONCURRENCY,\n getCapabilities,\n getMissingEnvMessage,\n parseEnv,\n} from '../config/index.js';\nimport {\n scrapeLinksOutputSchema,\n scrapeLinksParamsSchema,\n type ScrapeLinksParams,\n type ScrapeLinksOutput,\n} from '../schemas/scrape-links.js';\nimport { ScraperClient } from '../clients/scraper.js';\nimport { RedditClient, type PostResult } from '../clients/reddit.js';\nimport { JinaClient } from '../clients/jina.js';\nimport { MarkdownCleaner } from '../services/markdown-cleaner.js';\nimport { createLLMProcessor, processContentWithLLM } from '../services/llm-processor.js';\nimport { removeMetaTags } from '../utils/markdown-formatter.js';\nimport { extractReadableContent } from '../utils/content-extractor.js';\nimport { classifyError, ErrorCode } from '../utils/errors.js';\nimport { isDocumentUrl } from '../utils/source-type.js';\nimport { pMap, pMapSettled } from '../utils/concurrency.js';\nimport {\n mcpLog,\n formatSuccess,\n formatError,\n formatBatchHeader,\n formatDuration,\n} from './utils.js';\nimport {\n createToolReporter,\n NOOP_REPORTER,\n toolFailure,\n toolSuccess,\n toToolResponse,\n type ToolExecutionResult,\n type ToolReporter,\n} from './mcp-helpers.js';\n\nconst markdownCleaner = new MarkdownCleaner();\n\nfunction enhanceExtractionInstruction(instruction: string | undefined): string {\n const base = instruction || 'Extract the main content and key information from this page.';\n return `${SCRAPER.EXTRACTION_PREFIX}\\n\\n${base}\\n\\n${SCRAPER.EXTRACTION_SUFFIX}`;\n}\n\n// --- Types ---\n\ninterface ProcessedResult {\n url: string;\n content: string;\n index: number; // original position in params.urls[]\n}\n\ninterface ScrapeMetrics {\n successful: number;\n failed: number;\n totalCredits: number;\n}\n\ninterface ScrapePhaseResult {\n successItems: ProcessedResult[];\n failedContents: string[];\n metrics: ScrapeMetrics;\n}\n\ninterface BranchInput {\n url: string;\n origIndex: number;\n}\n\ninterface ScrapeClients {\n client: ScraperClient;\n jinaClient: JinaClient;\n llmProcessor: ReturnType<typeof createLLMProcessor>;\n}\n\ninterface WebPhaseResult extends ScrapePhaseResult {\n /** URLs that Scrape.do returned as binary content; re-run through Jina. */\n binaryDeferred: BranchInput[];\n}\n\n// --- Reddit URL detection ---\n\nconst REDDIT_HOST = /(?:^|\\.)reddit\\.com$/i;\nconst REDDIT_POST_PERMALINK = /\\/r\\/[^/]+\\/comments\\/[a-z0-9]+/i;\n\nfunction isRedditUrl(url: string): boolean {\n try {\n const u = new URL(url);\n return REDDIT_HOST.test(u.hostname);\n } catch {\n return false;\n }\n}\n\nfunction isRedditPostPermalink(url: string): boolean {\n try {\n const u = new URL(url);\n return REDDIT_HOST.test(u.hostname) && REDDIT_POST_PERMALINK.test(u.pathname);\n } catch {\n return false;\n }\n}\n\n// --- Error helper ---\n\nfunction createScrapeErrorResponse(\n code: string,\n message: string,\n startTime: number,\n retryable = false,\n alternatives?: string[],\n): ToolExecutionResult<ScrapeLinksOutput> {\n return toolFailure(\n `${formatError({\n code,\n message,\n retryable,\n toolName: 'scrape-links',\n howToFix: code === 'NO_URLS' ? ['Provide at least one valid URL'] : undefined,\n alternatives,\n })}\\n\\nExecution time: ${formatDuration(Date.now() - startTime)}`,\n );\n}\n\n// --- URL partitioning ---\n\ninterface PartitionedUrls {\n webInputs: BranchInput[];\n redditInputs: BranchInput[];\n documentInputs: BranchInput[];\n invalidEntries: { url: string; origIndex: number }[];\n}\n\nfunction partitionUrls(urls: string[]): PartitionedUrls {\n const webInputs: BranchInput[] = [];\n const redditInputs: BranchInput[] = [];\n const documentInputs: BranchInput[] = [];\n const invalidEntries: { url: string; origIndex: number }[] = [];\n\n for (let i = 0; i < urls.length; i++) {\n const url = urls[i]!;\n try {\n new URL(url);\n } catch {\n invalidEntries.push({ url, origIndex: i });\n continue;\n }\n // Document URLs (.pdf/.docx/.pptx/.xlsx) go straight to Jina Reader \u2014\n // bypassing Scrape.do because it cannot decode binary bodies. Ordered\n // before the Reddit check so a hypothetical PDF on a reddit-adjacent host\n // still takes the document path.\n if (isDocumentUrl(url)) {\n documentInputs.push({ url, origIndex: i });\n } else if (isRedditUrl(url)) {\n redditInputs.push({ url, origIndex: i });\n } else {\n webInputs.push({ url, origIndex: i });\n }\n }\n\n return { webInputs, redditInputs, documentInputs, invalidEntries };\n}\n\n// --- Web branch ---\n\nasync function fetchWebBranch(\n inputs: BranchInput[],\n client: ScraperClient,\n): Promise<WebPhaseResult> {\n if (inputs.length === 0) {\n return {\n successItems: [],\n failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n binaryDeferred: [],\n };\n }\n\n mcpLog('info', `[concurrency] web branch: fanning out ${inputs.length} URL(s) with limit=${CONCURRENCY.SCRAPER}`, 'scrape');\n const urls = inputs.map((i) => i.url);\n const results = await client.scrapeMultiple(urls, { timeout: 60 });\n const urlToIndex = new Map(inputs.map((i) => [i.url, i.origIndex]));\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [];\n const binaryDeferred: BranchInput[] = [];\n let successful = 0;\n let failed = 0;\n let totalCredits = 0;\n\n for (let i = 0; i < results.length; i++) {\n const result = results[i];\n const origIndex = inputs[i]!.origIndex;\n if (!result) {\n failed++;\n failedContents.push(`## ${inputs[i]!.url}\\n\\n\u274C No result returned`);\n continue;\n }\n\n // Binary document detected by content-type \u2014 defer to Jina Reader.\n // These URLs are not counted as failures yet; the handler will re-run\n // them through the document branch and merge results.\n if (result.error?.code === ErrorCode.UNSUPPORTED_BINARY_CONTENT) {\n binaryDeferred.push({\n url: result.url,\n origIndex: urlToIndex.get(result.url) ?? origIndex,\n });\n continue;\n }\n\n if (result.error || result.statusCode < 200 || result.statusCode >= 300) {\n failed++;\n const errorMsg = result.error?.message || result.content || `HTTP ${result.statusCode}`;\n failedContents.push(`## ${result.url}\\n\\n\u274C Failed to scrape: ${errorMsg}`);\n continue;\n }\n\n successful++;\n totalCredits += result.credits;\n\n let content: string;\n try {\n const readable = extractReadableContent(result.content, result.url);\n const sourceForCleaner = readable.extracted ? readable.content : result.content;\n content = markdownCleaner.processContent(sourceForCleaner);\n } catch {\n content = result.content;\n }\n\n successItems.push({ url: result.url, content, index: origIndex });\n }\n\n return {\n successItems,\n failedContents,\n metrics: { successful, failed, totalCredits },\n binaryDeferred,\n };\n}\n\n// --- Document branch (Jina Reader) ---\n\nasync function fetchDocumentBranch(\n inputs: BranchInput[],\n jinaClient: JinaClient,\n): Promise<ScrapePhaseResult> {\n if (inputs.length === 0) {\n return { successItems: [], failedContents: [], metrics: { successful: 0, failed: 0, totalCredits: 0 } };\n }\n\n mcpLog(\n 'info',\n `[concurrency] document branch (jina): converting ${inputs.length} URL(s) with limit=${CONCURRENCY.SCRAPER}`,\n 'scrape',\n );\n\n const results = await pMapSettled(\n inputs,\n (input) => jinaClient.convert({ url: input.url }),\n CONCURRENCY.SCRAPER,\n );\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [];\n let successful = 0;\n let failed = 0;\n\n for (let i = 0; i < results.length; i++) {\n const settled = results[i];\n const input = inputs[i]!;\n if (!settled) {\n failed++;\n failedContents.push(`## ${input.url}\\n\\n\u274C No result returned (document conversion)`);\n continue;\n }\n if (settled.status === 'rejected') {\n failed++;\n const reason = settled.reason instanceof Error ? settled.reason.message : String(settled.reason);\n failedContents.push(`## ${input.url}\\n\\n\u274C Document conversion failed: ${reason}`);\n continue;\n }\n\n const result = settled.value;\n if (result.error || result.statusCode < 200 || result.statusCode >= 300) {\n failed++;\n const errorMsg = result.error?.message || `HTTP ${result.statusCode}`;\n failedContents.push(`## ${input.url}\\n\\n\u274C Document conversion failed: ${errorMsg}`);\n continue;\n }\n\n successful++;\n successItems.push({ url: input.url, content: result.content, index: input.origIndex });\n }\n\n return { successItems, failedContents, metrics: { successful, failed, totalCredits: 0 } };\n}\n\n// --- Reddit branch ---\n\nfunction formatRedditPostAsMarkdown(result: PostResult): string {\n const { post, comments } = result;\n const lines: string[] = [];\n lines.push(`# ${post.title}`);\n lines.push('');\n lines.push(`**r/${post.subreddit}** \u2022 u/${post.author} \u2022 \u2B06\uFE0F ${post.score} \u2022 \uD83D\uDCAC ${post.commentCount} comments`);\n lines.push(`\uD83D\uDD17 ${post.url}`);\n lines.push('');\n if (post.body) {\n lines.push('## Post content');\n lines.push('');\n lines.push(post.body);\n lines.push('');\n }\n if (comments.length > 0) {\n lines.push(`## Top comments (${comments.length} total)`);\n lines.push('');\n for (const c of comments) {\n const indent = ' '.repeat(c.depth);\n const op = c.isOP ? ' **[OP]**' : '';\n const score = c.score >= 0 ? `+${c.score}` : `${c.score}`;\n lines.push(`${indent}- **u/${c.author}**${op} _(${score})_`);\n for (const line of c.body.split('\\n')) {\n lines.push(`${indent} ${line}`);\n }\n lines.push('');\n }\n }\n return lines.join('\\n');\n}\n\nasync function fetchRedditBranch(inputs: BranchInput[]): Promise<ScrapePhaseResult> {\n if (inputs.length === 0) {\n return { successItems: [], failedContents: [], metrics: { successful: 0, failed: 0, totalCredits: 0 } };\n }\n\n const env = parseEnv();\n if (!env.REDDIT_CLIENT_ID || !env.REDDIT_CLIENT_SECRET) {\n const failedContents = inputs.map(\n (i) => `## ${i.url}\\n\\n\u274C Reddit URL detected, but Reddit API is not configured. Set \\`REDDIT_CLIENT_ID\\` and \\`REDDIT_CLIENT_SECRET\\` in the server env to enable threaded Reddit scraping.`,\n );\n return {\n successItems: [],\n failedContents,\n metrics: { successful: 0, failed: inputs.length, totalCredits: 0 },\n };\n }\n\n // Warn for non-permalink Reddit URLs (subreddit homepages, /new, /top, /hot,\n // user profiles). The Reddit API path we call requires /r/.../comments/... \u2014\n // reject upfront so the caller sees a helpful message instead of a 404.\n const [postInputs, nonPermalinks] = inputs.reduce<[BranchInput[], BranchInput[]]>(\n ([posts, rest], input) => {\n if (isRedditPostPermalink(input.url)) posts.push(input);\n else rest.push(input);\n return [posts, rest];\n },\n [[], []],\n );\n\n const nonPermalinkFailed = nonPermalinks.map(\n (i) => `## ${i.url}\\n\\n\u274C Only Reddit post permalinks (/r/<sub>/comments/<id>/...) are supported. Use web-search with scope:\"reddit\" to discover post permalinks first.`,\n );\n\n if (postInputs.length === 0) {\n return {\n successItems: [],\n failedContents: nonPermalinkFailed,\n metrics: { successful: 0, failed: nonPermalinks.length, totalCredits: 0 },\n };\n }\n\n mcpLog('info', `[concurrency] reddit branch: fetching ${postInputs.length} post(s) with limit=${CONCURRENCY.REDDIT}`, 'scrape');\n const client = new RedditClient(env.REDDIT_CLIENT_ID, env.REDDIT_CLIENT_SECRET);\n const urls = postInputs.map((i) => i.url);\n const batchResult = await client.batchGetPosts(urls, true);\n const urlToIndex = new Map(postInputs.map((i) => [i.url, i.origIndex]));\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [...nonPermalinkFailed];\n let successful = 0;\n let failed = nonPermalinks.length;\n\n for (const [url, result] of batchResult.results) {\n const origIndex = urlToIndex.get(url) ?? -1;\n if (result instanceof Error) {\n failed++;\n failedContents.push(`## ${url}\\n\\n\u274C Reddit fetch failed: ${result.message}`);\n continue;\n }\n successful++;\n successItems.push({ url, content: formatRedditPostAsMarkdown(result), index: origIndex });\n }\n\n return { successItems, failedContents, metrics: { successful, failed, totalCredits: 0 } };\n}\n\n// --- LLM extraction (shared by both branches) ---\n\nasync function processItemsWithLlm(\n successItems: ProcessedResult[],\n enhancedInstruction: string,\n llmProcessor: ReturnType<typeof createLLMProcessor>,\n reporter: ToolReporter,\n): Promise<{ items: ProcessedResult[]; llmErrors: number; llmAttempted: number }> {\n let llmErrors = 0;\n\n if (!llmProcessor || successItems.length === 0) {\n if (!llmProcessor && successItems.length > 0) {\n mcpLog('warning', 'LLM unavailable (LLM_API_KEY not set). Returning raw scraped content.', 'scrape');\n void reporter.log('warning', 'llm_extractor_unreachable: planner not configured; raw scraped content returned');\n }\n return { items: successItems, llmErrors, llmAttempted: 0 };\n }\n\n mcpLog('info', `[concurrency] llm extraction: fanning out ${successItems.length} item(s) with limit=${CONCURRENCY.LLM_EXTRACTION}`, 'scrape');\n\n const llmResults = await pMap(\n successItems,\n async (item) => {\n mcpLog('debug', `LLM extracting ${item.url}...`, 'scrape');\n\n const llmResult = await processContentWithLLM(\n item.content,\n { enabled: true, extract: enhancedInstruction, url: item.url },\n llmProcessor,\n );\n\n if (llmResult.processed) {\n return { ...item, content: llmResult.content };\n }\n\n llmErrors++;\n mcpLog('warning', `LLM extraction failed for ${item.url}: ${llmResult.error || 'unknown reason'}`, 'scrape');\n void reporter.log('warning', `llm_extractor_unreachable: ${item.url} \u2014 ${llmResult.error || 'unknown reason'}`);\n return item;\n },\n CONCURRENCY.LLM_EXTRACTION,\n );\n\n return { items: llmResults, llmErrors, llmAttempted: successItems.length };\n}\n\n// --- Output assembly ---\n\nfunction assembleContentEntries(successItems: ProcessedResult[], failedContents: string[]): string[] {\n const sorted = [...successItems].sort((a, b) => a.index - b.index);\n const contents = [...failedContents];\n for (const item of sorted) {\n let content = item.content;\n try {\n content = removeMetaTags(content);\n } catch {\n // Use content as-is\n }\n contents.push(`## ${item.url}\\n\\n${content}`);\n }\n return contents;\n}\n\nfunction buildScrapeResponse(\n params: ScrapeLinksParams,\n contents: string[],\n metrics: ScrapeMetrics,\n llmErrors: number,\n executionTime: number,\n llmAccounting: { llmAttempted: number; llmSucceeded: boolean },\n): { content: string; structuredContent: ScrapeLinksOutput } {\n const llmExtras: Record<string, string | number> = {};\n if (llmAccounting.llmAttempted > 0) {\n const ok = llmAccounting.llmAttempted - llmErrors;\n llmExtras['LLM extraction'] = `${ok}/${llmAccounting.llmAttempted} succeeded`;\n if (!llmAccounting.llmSucceeded) {\n llmExtras['LLM credit'] = '0 charged (no extraction produced)';\n }\n } else if (llmErrors > 0) {\n llmExtras['LLM extraction failures'] = llmErrors;\n }\n\n const batchHeader = formatBatchHeader({\n title: `Scraped Content (${params.urls.length} URLs)`,\n totalItems: params.urls.length,\n successful: metrics.successful,\n failed: metrics.failed,\n extras: {\n 'Credits used': metrics.totalCredits,\n ...llmExtras,\n },\n });\n\n const formattedContent = formatSuccess({\n title: 'Scraping Complete',\n summary: batchHeader,\n data: contents.join('\\n\\n---\\n\\n'),\n metadata: {\n 'Execution time': formatDuration(executionTime),\n },\n });\n\n const metadata: ScrapeLinksOutput['metadata'] = {\n total_items: params.urls.length,\n successful: metrics.successful,\n failed: metrics.failed,\n execution_time_ms: executionTime,\n total_credits: metrics.totalCredits,\n };\n return { content: formattedContent, structuredContent: { metadata } };\n}\n\n// --- Handler ---\n\nexport async function handleScrapeLinks(\n params: ScrapeLinksParams,\n reporter: ToolReporter = NOOP_REPORTER,\n): Promise<ToolExecutionResult<ScrapeLinksOutput>> {\n const startTime = Date.now();\n\n if (!params.urls || params.urls.length === 0) {\n return createScrapeErrorResponse('NO_URLS', 'No URLs provided', startTime);\n }\n\n const { webInputs, redditInputs, documentInputs, invalidEntries } = partitionUrls(params.urls);\n const validCount = webInputs.length + redditInputs.length + documentInputs.length;\n\n await reporter.log(\n 'info',\n `Partitioned ${params.urls.length} URL(s): ${webInputs.length} web, ${redditInputs.length} reddit, ${documentInputs.length} document, ${invalidEntries.length} invalid`,\n );\n\n if (validCount === 0) {\n return createScrapeErrorResponse(\n 'INVALID_URLS',\n `All ${params.urls.length} URLs are invalid`,\n startTime,\n false,\n [\n 'web-search(queries=[...], extract=\"...\") \u2014 search for valid URLs first, then scrape the results',\n ],\n );\n }\n\n mcpLog(\n 'info',\n `Starting scrape: ${webInputs.length} web + ${redditInputs.length} reddit + ${documentInputs.length} document URL(s)`,\n 'scrape',\n );\n await reporter.progress(15, 100, 'Preparing scraper clients');\n\n // Only initialize the Scrape.do client if we actually have HTML/web URLs.\n // The Jina client is cheap (no auth needed) and always constructed so the\n // document branch and the web\u2192Jina fallback path both work uniformly.\n let clients: ScrapeClients | null = null;\n try {\n const jinaClient = new JinaClient();\n if (webInputs.length > 0) {\n clients = {\n client: new ScraperClient(),\n jinaClient,\n llmProcessor: createLLMProcessor(),\n };\n } else {\n clients = {\n client: null as unknown as ScraperClient,\n jinaClient,\n llmProcessor: createLLMProcessor(),\n };\n }\n } catch (error) {\n const err = classifyError(error);\n return createScrapeErrorResponse(\n 'CLIENT_INIT_FAILED',\n `Failed to initialize scraper: ${err.message}`,\n startTime,\n false,\n [\n 'web-search(queries=[\"topic key findings\", \"topic summary\"], extract=\"key findings and summary\") \u2014 search instead of scraping',\n ],\n );\n }\n\n const enhancedInstruction = enhanceExtractionInstruction(params.extract);\n\n await reporter.progress(35, 100, 'Fetching page content');\n\n // Phase 1 \u2014 run all three branches in parallel. Failures in one branch do\n // not block the others. The web branch may surface binary-content URLs via\n // `binaryDeferred`, which are re-routed through Jina in Phase 2.\n const emptyPhase: WebPhaseResult = {\n successItems: [], failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n binaryDeferred: [],\n };\n const [webPhase, redditPhase, documentPhase] = await Promise.all([\n webInputs.length > 0\n ? fetchWebBranch(webInputs, clients.client)\n : Promise.resolve<WebPhaseResult>(emptyPhase),\n fetchRedditBranch(redditInputs),\n fetchDocumentBranch(documentInputs, clients.jinaClient),\n ]);\n\n // Phase 2 \u2014 fallback for URLs that Scrape.do reported as binary.\n let deferredPhase: ScrapePhaseResult = {\n successItems: [], failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n };\n if (webPhase.binaryDeferred.length > 0) {\n await reporter.log(\n 'info',\n `Rerouting ${webPhase.binaryDeferred.length} binary URL(s) from Scrape.do \u2192 Jina Reader`,\n );\n deferredPhase = await fetchDocumentBranch(webPhase.binaryDeferred, clients.jinaClient);\n }\n\n const successItems = [\n ...webPhase.successItems,\n ...redditPhase.successItems,\n ...documentPhase.successItems,\n ...deferredPhase.successItems,\n ];\n const invalidFailed = invalidEntries.map(\n ({ url }) => `## ${url}\\n\\n\u274C Invalid URL format`,\n );\n const failedContents = [\n ...invalidFailed,\n ...webPhase.failedContents,\n ...redditPhase.failedContents,\n ...documentPhase.failedContents,\n ...deferredPhase.failedContents,\n ];\n const metrics: ScrapeMetrics = {\n successful:\n webPhase.metrics.successful\n + redditPhase.metrics.successful\n + documentPhase.metrics.successful\n + deferredPhase.metrics.successful,\n failed:\n invalidEntries.length\n + webPhase.metrics.failed\n + redditPhase.metrics.failed\n + documentPhase.metrics.failed\n + deferredPhase.metrics.failed,\n totalCredits: webPhase.metrics.totalCredits,\n };\n\n await reporter.log('info', `Fetched ${metrics.successful} page(s), ${metrics.failed} failed`);\n\n if (successItems.length > 0) {\n await reporter.progress(80, 100, 'Running LLM extraction over fetched pages');\n }\n\n const { items: processedItems, llmErrors, llmAttempted } = await processItemsWithLlm(\n successItems,\n enhancedInstruction,\n clients.llmProcessor,\n reporter,\n );\n\n const contents = assembleContentEntries(processedItems, failedContents);\n const executionTime = Date.now() - startTime;\n\n mcpLog(\n 'info',\n `Completed: ${metrics.successful} successful, ${metrics.failed} failed, ${metrics.totalCredits} credits used`,\n 'scrape',\n );\n\n const llmSucceeded = llmAttempted > 0 && llmErrors < llmAttempted;\n const result = buildScrapeResponse(\n params,\n contents,\n metrics,\n llmErrors,\n executionTime,\n { llmAttempted, llmSucceeded },\n );\n\n if (metrics.successful === 0 && metrics.failed > 0) {\n return toolFailure(result.content);\n }\n\n return toolSuccess(result.content, result.structuredContent);\n}\n\nexport function registerScrapeLinksTool(server: MCPServer): void {\n server.tool(\n {\n name: 'scrape-links',\n title: 'Scrape Links',\n description:\n 'Fetch many URLs in parallel and run per-URL structured LLM extraction. Auto-detects reddit.com post permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Safe to call in parallel \u2014 group URLs by context rather than jamming unrelated batches together. Each page returns `## Source`, `## Matches` (verbatim-preserved facts), `## Not found` (explicit gaps), and `## Follow-up signals` (new terms + referenced URLs) that feed the next research loop. Describe the SHAPE of what you want in `extract`, facets separated by `|` (e.g. `root cause | affected versions | fix | workarounds | timeline`).',\n schema: scrapeLinksParamsSchema,\n outputSchema: scrapeLinksOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: true,\n },\n },\n async (args, ctx) => {\n if (!getCapabilities().scraping) {\n return toToolResponse(toolFailure(getMissingEnvMessage('scraping')));\n }\n\n const reporter = createToolReporter(ctx, 'scrape-links');\n const result = await handleScrapeLinks(args, reporter);\n\n await reporter.progress(100, 100, result.isError ? 'Scrape failed' : 'Scrape complete');\n return toToolResponse(result);\n },\n );\n}\n"],
5
- "mappings": "AAYA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP,SAAS,qBAAqB;AAC9B,SAAS,oBAAqC;AAC9C,SAAS,kBAAkB;AAC3B,SAAS,uBAAuB;AAChC,SAAS,oBAAoB,6BAA6B;AAC1D,SAAS,sBAAsB;AAC/B,SAAS,8BAA8B;AACvC,SAAS,eAAe,iBAAiB;AACzC,SAAS,qBAAqB;AAC9B,SAAS,MAAM,mBAAmB;AAClC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAGK;AAEP,MAAM,kBAAkB,IAAI,gBAAgB;AAE5C,SAAS,6BAA6B,aAAyC;AAC7E,QAAM,OAAO,eAAe;AAC5B,SAAO,GAAG,QAAQ,iBAAiB;AAAA;AAAA,EAAO,IAAI;AAAA;AAAA,EAAO,QAAQ,iBAAiB;AAChF;AAwCA,MAAM,cAAc;AACpB,MAAM,wBAAwB;AAE9B,SAAS,YAAY,KAAsB;AACzC,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,WAAO,YAAY,KAAK,EAAE,QAAQ;AAAA,EACpC,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,SAAS,sBAAsB,KAAsB;AACnD,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,WAAO,YAAY,KAAK,EAAE,QAAQ,KAAK,sBAAsB,KAAK,EAAE,QAAQ;AAAA,EAC9E,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAIA,SAAS,0BACP,MACA,SACA,WACA,YAAY,OACZ,cACwC;AACxC,SAAO;AAAA,IACL,GAAG,YAAY;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA,MACV,UAAU,SAAS,YAAY,CAAC,gCAAgC,IAAI;AAAA,MACpE;AAAA,IACF,CAAC,CAAC;AAAA;AAAA,kBAAuB,eAAe,KAAK,IAAI,IAAI,SAAS,CAAC;AAAA,EACjE;AACF;AAWA,SAAS,cAAc,MAAiC;AACtD,QAAM,YAA2B,CAAC;AAClC,QAAM,eAA8B,CAAC;AACrC,QAAM,iBAAgC,CAAC;AACvC,QAAM,iBAAuD,CAAC;AAE9D,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,MAAM,KAAK,CAAC;AAClB,QAAI;AACF,UAAI,IAAI,GAAG;AAAA,IACb,QAAQ;AACN,qBAAe,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AACzC;AAAA,IACF;AAKA,QAAI,cAAc,GAAG,GAAG;AACtB,qBAAe,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IAC3C,WAAW,YAAY,GAAG,GAAG;AAC3B,mBAAa,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IACzC,OAAO;AACL,gBAAU,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO,EAAE,WAAW,cAAc,gBAAgB,eAAe;AACnE;AAIA,eAAe,eACb,QACA,QACyB;AACzB,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAgB,CAAC;AAAA,MACjB,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,MACrD,gBAAgB,CAAC;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,QAAQ,yCAAyC,OAAO,MAAM,sBAAsB,YAAY,OAAO,IAAI,QAAQ;AAC1H,QAAM,OAAO,OAAO,IAAI,CAAC,MAAM,EAAE,GAAG;AACpC,QAAM,UAAU,MAAM,OAAO,eAAe,MAAM,EAAE,SAAS,GAAG,CAAC;AACjE,QAAM,aAAa,IAAI,IAAI,OAAO,IAAI,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,CAAC;AAElE,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC;AAClC,QAAM,iBAAgC,CAAC;AACvC,MAAI,aAAa;AACjB,MAAI,SAAS;AACb,MAAI,eAAe;AAEnB,WAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,UAAM,SAAS,QAAQ,CAAC;AACxB,UAAM,YAAY,OAAO,CAAC,EAAG;AAC7B,QAAI,CAAC,QAAQ;AACX;AACA,qBAAe,KAAK,MAAM,OAAO,CAAC,EAAG,GAAG;AAAA;AAAA,0BAA0B;AAClE;AAAA,IACF;AAKA,QAAI,OAAO,OAAO,SAAS,UAAU,4BAA4B;AAC/D,qBAAe,KAAK;AAAA,QAClB,KAAK,OAAO;AAAA,QACZ,WAAW,WAAW,IAAI,OAAO,GAAG,KAAK;AAAA,MAC3C,CAAC;AACD;AAAA,IACF;AAEA,QAAI,OAAO,SAAS,OAAO,aAAa,OAAO,OAAO,cAAc,KAAK;AACvE;AACA,YAAM,WAAW,OAAO,OAAO,WAAW,OAAO,WAAW,QAAQ,OAAO,UAAU;AACrF,qBAAe,KAAK,MAAM,OAAO,GAAG;AAAA;AAAA,2BAA2B,QAAQ,EAAE;AACzE;AAAA,IACF;AAEA;AACA,oBAAgB,OAAO;AAEvB,QAAI;AACJ,QAAI;AACF,YAAM,WAAW,uBAAuB,OAAO,SAAS,OAAO,GAAG;AAClE,YAAM,mBAAmB,SAAS,YAAY,SAAS,UAAU,OAAO;AACxE,gBAAU,gBAAgB,eAAe,gBAAgB;AAAA,IAC3D,QAAQ;AACN,gBAAU,OAAO;AAAA,IACnB;AAEA,iBAAa,KAAK,EAAE,KAAK,OAAO,KAAK,SAAS,OAAO,UAAU,CAAC;AAAA,EAClE;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,SAAS,EAAE,YAAY,QAAQ,aAAa;AAAA,IAC5C;AAAA,EACF;AACF;AAIA,eAAe,oBACb,QACA,YAC4B;AAC5B,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO,EAAE,cAAc,CAAC,GAAG,gBAAgB,CAAC,GAAG,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE,EAAE;AAAA,EACxG;AAEA;AAAA,IACE;AAAA,IACA,oDAAoD,OAAO,MAAM,sBAAsB,YAAY,OAAO;AAAA,IAC1G;AAAA,EACF;AAEA,QAAM,UAAU,MAAM;AAAA,IACpB;AAAA,IACA,CAAC,UAAU,WAAW,QAAQ,EAAE,KAAK,MAAM,IAAI,CAAC;AAAA,IAChD,YAAY;AAAA,EACd;AAEA,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC;AAClC,MAAI,aAAa;AACjB,MAAI,SAAS;AAEb,WAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,UAAM,UAAU,QAAQ,CAAC;AACzB,UAAM,QAAQ,OAAO,CAAC;AACtB,QAAI,CAAC,SAAS;AACZ;AACA,qBAAe,KAAK,MAAM,MAAM,GAAG;AAAA;AAAA,gDAAgD;AACnF;AAAA,IACF;AACA,QAAI,QAAQ,WAAW,YAAY;AACjC;AACA,YAAM,SAAS,QAAQ,kBAAkB,QAAQ,QAAQ,OAAO,UAAU,OAAO,QAAQ,MAAM;AAC/F,qBAAe,KAAK,MAAM,MAAM,GAAG;AAAA;AAAA,qCAAqC,MAAM,EAAE;AAChF;AAAA,IACF;AAEA,UAAM,SAAS,QAAQ;AACvB,QAAI,OAAO,SAAS,OAAO,aAAa,OAAO,OAAO,cAAc,KAAK;AACvE;AACA,YAAM,WAAW,OAAO,OAAO,WAAW,QAAQ,OAAO,UAAU;AACnE,qBAAe,KAAK,MAAM,MAAM,GAAG;AAAA;AAAA,qCAAqC,QAAQ,EAAE;AAClF;AAAA,IACF;AAEA;AACA,iBAAa,KAAK,EAAE,KAAK,MAAM,KAAK,SAAS,OAAO,SAAS,OAAO,MAAM,UAAU,CAAC;AAAA,EACvF;AAEA,SAAO,EAAE,cAAc,gBAAgB,SAAS,EAAE,YAAY,QAAQ,cAAc,EAAE,EAAE;AAC1F;AAIA,SAAS,2BAA2B,QAA4B;AAC9D,QAAM,EAAE,MAAM,SAAS,IAAI;AAC3B,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,KAAK,KAAK,KAAK,EAAE;AAC5B,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,OAAO,KAAK,SAAS,eAAU,KAAK,MAAM,wBAAS,KAAK,KAAK,qBAAS,KAAK,YAAY,WAAW;AAC7G,QAAM,KAAK,aAAM,KAAK,GAAG,EAAE;AAC3B,QAAM,KAAK,EAAE;AACb,MAAI,KAAK,MAAM;AACb,UAAM,KAAK,iBAAiB;AAC5B,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,KAAK,IAAI;AACpB,UAAM,KAAK,EAAE;AAAA,EACf;AACA,MAAI,SAAS,SAAS,GAAG;AACvB,UAAM,KAAK,oBAAoB,SAAS,MAAM,SAAS;AACvD,UAAM,KAAK,EAAE;AACb,eAAW,KAAK,UAAU;AACxB,YAAM,SAAS,KAAK,OAAO,EAAE,KAAK;AAClC,YAAM,KAAK,EAAE,OAAO,cAAc;AAClC,YAAM,QAAQ,EAAE,SAAS,IAAI,IAAI,EAAE,KAAK,KAAK,GAAG,EAAE,KAAK;AACvD,YAAM,KAAK,GAAG,MAAM,SAAS,EAAE,MAAM,KAAK,EAAE,MAAM,KAAK,IAAI;AAC3D,iBAAW,QAAQ,EAAE,KAAK,MAAM,IAAI,GAAG;AACrC,cAAM,KAAK,GAAG,MAAM,KAAK,IAAI,EAAE;AAAA,MACjC;AACA,YAAM,KAAK,EAAE;AAAA,IACf;AAAA,EACF;AACA,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,eAAe,kBAAkB,QAAmD;AAClF,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO,EAAE,cAAc,CAAC,GAAG,gBAAgB,CAAC,GAAG,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE,EAAE;AAAA,EACxG;AAEA,QAAM,MAAM,SAAS;AACrB,MAAI,CAAC,IAAI,oBAAoB,CAAC,IAAI,sBAAsB;AACtD,UAAMA,kBAAiB,OAAO;AAAA,MAC5B,CAAC,MAAM,MAAM,EAAE,GAAG;AAAA;AAAA;AAAA,IACpB;AACA,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAAA;AAAA,MACA,SAAS,EAAE,YAAY,GAAG,QAAQ,OAAO,QAAQ,cAAc,EAAE;AAAA,IACnE;AAAA,EACF;AAKA,QAAM,CAAC,YAAY,aAAa,IAAI,OAAO;AAAA,IACzC,CAAC,CAAC,OAAO,IAAI,GAAG,UAAU;AACxB,UAAI,sBAAsB,MAAM,GAAG,EAAG,OAAM,KAAK,KAAK;AAAA,UACjD,MAAK,KAAK,KAAK;AACpB,aAAO,CAAC,OAAO,IAAI;AAAA,IACrB;AAAA,IACA,CAAC,CAAC,GAAG,CAAC,CAAC;AAAA,EACT;AAEA,QAAM,qBAAqB,cAAc;AAAA,IACvC,CAAC,MAAM,MAAM,EAAE,GAAG;AAAA;AAAA;AAAA,EACpB;AAEA,MAAI,WAAW,WAAW,GAAG;AAC3B,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAgB;AAAA,MAChB,SAAS,EAAE,YAAY,GAAG,QAAQ,cAAc,QAAQ,cAAc,EAAE;AAAA,IAC1E;AAAA,EACF;AAEA,SAAO,QAAQ,yCAAyC,WAAW,MAAM,uBAAuB,YAAY,MAAM,IAAI,QAAQ;AAC9H,QAAM,SAAS,IAAI,aAAa,IAAI,kBAAkB,IAAI,oBAAoB;AAC9E,QAAM,OAAO,WAAW,IAAI,CAAC,MAAM,EAAE,GAAG;AACxC,QAAM,cAAc,MAAM,OAAO,cAAc,MAAM,IAAI;AACzD,QAAM,aAAa,IAAI,IAAI,WAAW,IAAI,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,CAAC;AAEtE,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC,GAAG,kBAAkB;AACvD,MAAI,aAAa;AACjB,MAAI,SAAS,cAAc;AAE3B,aAAW,CAAC,KAAK,MAAM,KAAK,YAAY,SAAS;AAC/C,UAAM,YAAY,WAAW,IAAI,GAAG,KAAK;AACzC,QAAI,kBAAkB,OAAO;AAC3B;AACA,qBAAe,KAAK,MAAM,GAAG;AAAA;AAAA,8BAA8B,OAAO,OAAO,EAAE;AAC3E;AAAA,IACF;AACA;AACA,iBAAa,KAAK,EAAE,KAAK,SAAS,2BAA2B,MAAM,GAAG,OAAO,UAAU,CAAC;AAAA,EAC1F;AAEA,SAAO,EAAE,cAAc,gBAAgB,SAAS,EAAE,YAAY,QAAQ,cAAc,EAAE,EAAE;AAC1F;AAIA,eAAe,oBACb,cACA,qBACA,cACA,UACgF;AAChF,MAAI,YAAY;AAEhB,MAAI,CAAC,gBAAgB,aAAa,WAAW,GAAG;AAC9C,QAAI,CAAC,gBAAgB,aAAa,SAAS,GAAG;AAC5C,aAAO,WAAW,yEAAyE,QAAQ;AACnG,WAAK,SAAS,IAAI,WAAW,iFAAiF;AAAA,IAChH;AACA,WAAO,EAAE,OAAO,cAAc,WAAW,cAAc,EAAE;AAAA,EAC3D;AAEA,SAAO,QAAQ,6CAA6C,aAAa,MAAM,uBAAuB,YAAY,cAAc,IAAI,QAAQ;AAE5I,QAAM,aAAa,MAAM;AAAA,IACvB;AAAA,IACA,OAAO,SAAS;AACd,aAAO,SAAS,kBAAkB,KAAK,GAAG,OAAO,QAAQ;AAEzD,YAAM,YAAY,MAAM;AAAA,QACtB,KAAK;AAAA,QACL,EAAE,SAAS,MAAM,SAAS,qBAAqB,KAAK,KAAK,IAAI;AAAA,QAC7D;AAAA,MACF;AAEA,UAAI,UAAU,WAAW;AACvB,eAAO,EAAE,GAAG,MAAM,SAAS,UAAU,QAAQ;AAAA,MAC/C;AAEA;AACA,aAAO,WAAW,6BAA6B,KAAK,GAAG,KAAK,UAAU,SAAS,gBAAgB,IAAI,QAAQ;AAC3G,WAAK,SAAS,IAAI,WAAW,8BAA8B,KAAK,GAAG,WAAM,UAAU,SAAS,gBAAgB,EAAE;AAC9G,aAAO;AAAA,IACT;AAAA,IACA,YAAY;AAAA,EACd;AAEA,SAAO,EAAE,OAAO,YAAY,WAAW,cAAc,aAAa,OAAO;AAC3E;AAIA,SAAS,uBAAuB,cAAiC,gBAAoC;AACnG,QAAM,SAAS,CAAC,GAAG,YAAY,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AACjE,QAAM,WAAW,CAAC,GAAG,cAAc;AACnC,aAAW,QAAQ,QAAQ;AACzB,QAAI,UAAU,KAAK;AACnB,QAAI;AACF,gBAAU,eAAe,OAAO;AAAA,IAClC,QAAQ;AAAA,IAER;AACA,aAAS,KAAK,MAAM,KAAK,GAAG;AAAA;AAAA,EAAO,OAAO,EAAE;AAAA,EAC9C;AACA,SAAO;AACT;AAEA,SAAS,oBACP,QACA,UACA,SACA,WACA,eACA,eAC2D;AAC3D,QAAM,YAA6C,CAAC;AACpD,MAAI,cAAc,eAAe,GAAG;AAClC,UAAM,KAAK,cAAc,eAAe;AACxC,cAAU,gBAAgB,IAAI,GAAG,EAAE,IAAI,cAAc,YAAY;AACjE,QAAI,CAAC,cAAc,cAAc;AAC/B,gBAAU,YAAY,IAAI;AAAA,IAC5B;AAAA,EACF,WAAW,YAAY,GAAG;AACxB,cAAU,yBAAyB,IAAI;AAAA,EACzC;AAEA,QAAM,cAAc,kBAAkB;AAAA,IACpC,OAAO,oBAAoB,OAAO,KAAK,MAAM;AAAA,IAC7C,YAAY,OAAO,KAAK;AAAA,IACxB,YAAY,QAAQ;AAAA,IACpB,QAAQ,QAAQ;AAAA,IAChB,QAAQ;AAAA,MACN,gBAAgB,QAAQ;AAAA,MACxB,GAAG;AAAA,IACL;AAAA,EACF,CAAC;AAED,QAAM,mBAAmB,cAAc;AAAA,IACrC,OAAO;AAAA,IACP,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,aAAa;AAAA,IACjC,UAAU;AAAA,MACR,kBAAkB,eAAe,aAAa;AAAA,IAChD;AAAA,EACF,CAAC;AAED,QAAM,WAA0C;AAAA,IAC9C,aAAa,OAAO,KAAK;AAAA,IACzB,YAAY,QAAQ;AAAA,IACpB,QAAQ,QAAQ;AAAA,IAChB,mBAAmB;AAAA,IACnB,eAAe,QAAQ;AAAA,EACzB;AACA,SAAO,EAAE,SAAS,kBAAkB,mBAAmB,EAAE,SAAS,EAAE;AACtE;AAIA,eAAsB,kBACpB,QACA,WAAyB,eACwB;AACjD,QAAM,YAAY,KAAK,IAAI;AAE3B,MAAI,CAAC,OAAO,QAAQ,OAAO,KAAK,WAAW,GAAG;AAC5C,WAAO,0BAA0B,WAAW,oBAAoB,SAAS;AAAA,EAC3E;AAEA,QAAM,EAAE,WAAW,cAAc,gBAAgB,eAAe,IAAI,cAAc,OAAO,IAAI;AAC7F,QAAM,aAAa,UAAU,SAAS,aAAa,SAAS,eAAe;AAE3E,QAAM,SAAS;AAAA,IACb;AAAA,IACA,eAAe,OAAO,KAAK,MAAM,YAAY,UAAU,MAAM,SAAS,aAAa,MAAM,YAAY,eAAe,MAAM,cAAc,eAAe,MAAM;AAAA,EAC/J;AAEA,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,MACL;AAAA,MACA,OAAO,OAAO,KAAK,MAAM;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,QACE;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA;AAAA,IACE;AAAA,IACA,oBAAoB,UAAU,MAAM,UAAU,aAAa,MAAM,aAAa,eAAe,MAAM;AAAA,IACnG;AAAA,EACF;AACA,QAAM,SAAS,SAAS,IAAI,KAAK,2BAA2B;AAK5D,MAAI,UAAgC;AACpC,MAAI;AACF,UAAM,aAAa,IAAI,WAAW;AAClC,QAAI,UAAU,SAAS,GAAG;AACxB,gBAAU;AAAA,QACR,QAAQ,IAAI,cAAc;AAAA,QAC1B;AAAA,QACA,cAAc,mBAAmB;AAAA,MACnC;AAAA,IACF,OAAO;AACL,gBAAU;AAAA,QACR,QAAQ;AAAA,QACR;AAAA,QACA,cAAc,mBAAmB;AAAA,MACnC;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,UAAM,MAAM,cAAc,KAAK;AAC/B,WAAO;AAAA,MACL;AAAA,MACA,iCAAiC,IAAI,OAAO;AAAA,MAC5C;AAAA,MACA;AAAA,MACA;AAAA,QACE;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,sBAAsB,6BAA6B,OAAO,OAAO;AAEvE,QAAM,SAAS,SAAS,IAAI,KAAK,uBAAuB;AAKxD,QAAM,aAA6B;AAAA,IACjC,cAAc,CAAC;AAAA,IAAG,gBAAgB,CAAC;AAAA,IACnC,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,IACrD,gBAAgB,CAAC;AAAA,EACnB;AACA,QAAM,CAAC,UAAU,aAAa,aAAa,IAAI,MAAM,QAAQ,IAAI;AAAA,IAC/D,UAAU,SAAS,IACf,eAAe,WAAW,QAAQ,MAAM,IACxC,QAAQ,QAAwB,UAAU;AAAA,IAC9C,kBAAkB,YAAY;AAAA,IAC9B,oBAAoB,gBAAgB,QAAQ,UAAU;AAAA,EACxD,CAAC;AAGD,MAAI,gBAAmC;AAAA,IACrC,cAAc,CAAC;AAAA,IAAG,gBAAgB,CAAC;AAAA,IACnC,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,EACvD;AACA,MAAI,SAAS,eAAe,SAAS,GAAG;AACtC,UAAM,SAAS;AAAA,MACb;AAAA,MACA,aAAa,SAAS,eAAe,MAAM;AAAA,IAC7C;AACA,oBAAgB,MAAM,oBAAoB,SAAS,gBAAgB,QAAQ,UAAU;AAAA,EACvF;AAEA,QAAM,eAAe;AAAA,IACnB,GAAG,SAAS;AAAA,IACZ,GAAG,YAAY;AAAA,IACf,GAAG,cAAc;AAAA,IACjB,GAAG,cAAc;AAAA,EACnB;AACA,QAAM,gBAAgB,eAAe;AAAA,IACnC,CAAC,EAAE,IAAI,MAAM,MAAM,GAAG;AAAA;AAAA;AAAA,EACxB;AACA,QAAM,iBAAiB;AAAA,IACrB,GAAG;AAAA,IACH,GAAG,SAAS;AAAA,IACZ,GAAG,YAAY;AAAA,IACf,GAAG,cAAc;AAAA,IACjB,GAAG,cAAc;AAAA,EACnB;AACA,QAAM,UAAyB;AAAA,IAC7B,YACE,SAAS,QAAQ,aACf,YAAY,QAAQ,aACpB,cAAc,QAAQ,aACtB,cAAc,QAAQ;AAAA,IAC1B,QACE,eAAe,SACb,SAAS,QAAQ,SACjB,YAAY,QAAQ,SACpB,cAAc,QAAQ,SACtB,cAAc,QAAQ;AAAA,IAC1B,cAAc,SAAS,QAAQ;AAAA,EACjC;AAEA,QAAM,SAAS,IAAI,QAAQ,WAAW,QAAQ,UAAU,aAAa,QAAQ,MAAM,SAAS;AAE5F,MAAI,aAAa,SAAS,GAAG;AAC3B,UAAM,SAAS,SAAS,IAAI,KAAK,2CAA2C;AAAA,EAC9E;AAEA,QAAM,EAAE,OAAO,gBAAgB,WAAW,aAAa,IAAI,MAAM;AAAA,IAC/D;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,EACF;AAEA,QAAM,WAAW,uBAAuB,gBAAgB,cAAc;AACtE,QAAM,gBAAgB,KAAK,IAAI,IAAI;AAEnC;AAAA,IACE;AAAA,IACA,cAAc,QAAQ,UAAU,gBAAgB,QAAQ,MAAM,YAAY,QAAQ,YAAY;AAAA,IAC9F;AAAA,EACF;AAEA,QAAM,eAAe,eAAe,KAAK,YAAY;AACrD,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,EAAE,cAAc,aAAa;AAAA,EAC/B;AAEA,MAAI,QAAQ,eAAe,KAAK,QAAQ,SAAS,GAAG;AAClD,WAAO,YAAY,OAAO,OAAO;AAAA,EACnC;AAEA,SAAO,YAAY,OAAO,SAAS,OAAO,iBAAiB;AAC7D;AAEO,SAAS,wBAAwB,QAAyB;AAC/D,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,MAAM,QAAQ;AACnB,UAAI,CAAC,gBAAgB,EAAE,UAAU;AAC/B,eAAO,eAAe,YAAY,qBAAqB,UAAU,CAAC,CAAC;AAAA,MACrE;AAEA,YAAM,WAAW,mBAAmB,KAAK,cAAc;AACvD,YAAM,SAAS,MAAM,kBAAkB,MAAM,QAAQ;AAErD,YAAM,SAAS,SAAS,KAAK,KAAK,OAAO,UAAU,kBAAkB,iBAAiB;AACtF,aAAO,eAAe,MAAM;AAAA,IAC9B;AAAA,EACF;AACF;",
4
+ "sourcesContent": ["/**\n * Scrape Links Tool Handler\n *\n * Scrapes many URLs in parallel. Reddit permalinks (reddit.com/r/.../comments/...)\n * are auto-detected and routed through the Reddit API; all other URLs go through\n * the scraper. Both branches feed the same per-URL LLM extraction pipeline.\n *\n * NEVER throws \u2014 every error is returned as a tool-level failure response.\n */\n\nimport type { MCPServer } from 'mcp-use/server';\n\nimport {\n SCRAPER,\n CONCURRENCY,\n getCapabilities,\n getMissingEnvMessage,\n parseEnv,\n} from '../config/index.js';\nimport {\n scrapeLinksOutputSchema,\n scrapeLinksParamsSchema,\n type ScrapeLinksParams,\n type ScrapeLinksOutput,\n} from '../schemas/scrape-links.js';\nimport { ScraperClient } from '../clients/scraper.js';\nimport { RedditClient, type PostResult } from '../clients/reddit.js';\nimport { JinaClient } from '../clients/jina.js';\nimport { MarkdownCleaner } from '../services/markdown-cleaner.js';\nimport { createLLMProcessor, processContentWithLLM } from '../services/llm-processor.js';\nimport { removeMetaTags } from '../utils/markdown-formatter.js';\nimport { extractReadableContent } from '../utils/content-extractor.js';\nimport { classifyError, ErrorCode } from '../utils/errors.js';\nimport { isDocumentUrl } from '../utils/source-type.js';\nimport { pMap, pMapSettled } from '../utils/concurrency.js';\nimport {\n mcpLog,\n formatSuccess,\n formatError,\n formatBatchHeader,\n formatDuration,\n} from './utils.js';\nimport {\n createToolReporter,\n NOOP_REPORTER,\n toolFailure,\n toolSuccess,\n toToolResponse,\n type ToolExecutionResult,\n type ToolReporter,\n} from './mcp-helpers.js';\n\nconst markdownCleaner = new MarkdownCleaner();\n\nfunction enhanceExtractionInstruction(instruction: string | undefined): string {\n const base = instruction || 'Extract the main content and key information from this page.';\n return `${SCRAPER.EXTRACTION_PREFIX}\\n\\n${base}\\n\\n${SCRAPER.EXTRACTION_SUFFIX}`;\n}\n\n// --- Types ---\n\ninterface ProcessedResult {\n url: string;\n content: string;\n index: number; // original position in params.urls[]\n}\n\ninterface ScrapeMetrics {\n successful: number;\n failed: number;\n totalCredits: number;\n}\n\ninterface ScrapePhaseResult {\n successItems: ProcessedResult[];\n failedContents: string[];\n metrics: ScrapeMetrics;\n}\n\ninterface BranchInput {\n url: string;\n origIndex: number;\n}\n\ninterface ScrapeClients {\n client: ScraperClient;\n jinaClient: JinaClient;\n llmProcessor: ReturnType<typeof createLLMProcessor>;\n}\n\n/**\n * Any URL the web branch decides to hand off to Jina Reader \u2014 either because\n * Scrape.do returned a binary content-type, or because Scrape.do failed\n * outright (non-404 error). `scrapeError` is preserved so that, if Jina also\n * fails, the final error message can surface both layers.\n *\n * Genuine 404s are NOT put here \u2014 the URL doesn't exist; Jina won't help.\n */\ninterface JinaFallback {\n url: string;\n origIndex: number;\n reason: 'binary_content' | 'scrape_failed';\n scrapeError?: string;\n}\n\ninterface WebPhaseResult extends ScrapePhaseResult {\n jinaFallbacks: JinaFallback[];\n}\n\n// --- Reddit URL detection ---\n\nconst REDDIT_HOST = /(?:^|\\.)reddit\\.com$/i;\nconst REDDIT_POST_PERMALINK = /\\/r\\/[^/]+\\/comments\\/[a-z0-9]+/i;\n\nfunction isRedditUrl(url: string): boolean {\n try {\n const u = new URL(url);\n return REDDIT_HOST.test(u.hostname);\n } catch {\n return false;\n }\n}\n\nfunction isRedditPostPermalink(url: string): boolean {\n try {\n const u = new URL(url);\n return REDDIT_HOST.test(u.hostname) && REDDIT_POST_PERMALINK.test(u.pathname);\n } catch {\n return false;\n }\n}\n\n// --- Error helper ---\n\nfunction createScrapeErrorResponse(\n code: string,\n message: string,\n startTime: number,\n retryable = false,\n alternatives?: string[],\n): ToolExecutionResult<ScrapeLinksOutput> {\n return toolFailure(\n `${formatError({\n code,\n message,\n retryable,\n toolName: 'scrape-links',\n howToFix: code === 'NO_URLS' ? ['Provide at least one valid URL'] : undefined,\n alternatives,\n })}\\n\\nExecution time: ${formatDuration(Date.now() - startTime)}`,\n );\n}\n\n// --- URL partitioning ---\n\ninterface PartitionedUrls {\n webInputs: BranchInput[];\n redditInputs: BranchInput[];\n documentInputs: BranchInput[];\n invalidEntries: { url: string; origIndex: number }[];\n}\n\nfunction partitionUrls(urls: string[]): PartitionedUrls {\n const webInputs: BranchInput[] = [];\n const redditInputs: BranchInput[] = [];\n const documentInputs: BranchInput[] = [];\n const invalidEntries: { url: string; origIndex: number }[] = [];\n\n for (let i = 0; i < urls.length; i++) {\n const url = urls[i]!;\n try {\n new URL(url);\n } catch {\n invalidEntries.push({ url, origIndex: i });\n continue;\n }\n // Document URLs (.pdf/.docx/.pptx/.xlsx) go straight to Jina Reader \u2014\n // bypassing Scrape.do because it cannot decode binary bodies. Ordered\n // before the Reddit check so a hypothetical PDF on a reddit-adjacent host\n // still takes the document path.\n if (isDocumentUrl(url)) {\n documentInputs.push({ url, origIndex: i });\n } else if (isRedditUrl(url)) {\n redditInputs.push({ url, origIndex: i });\n } else {\n webInputs.push({ url, origIndex: i });\n }\n }\n\n return { webInputs, redditInputs, documentInputs, invalidEntries };\n}\n\n// --- Web branch ---\n\nasync function fetchWebBranch(\n inputs: BranchInput[],\n client: ScraperClient,\n): Promise<WebPhaseResult> {\n if (inputs.length === 0) {\n return {\n successItems: [],\n failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n jinaFallbacks: [],\n };\n }\n\n mcpLog('info', `[concurrency] web branch: fanning out ${inputs.length} URL(s) with limit=${CONCURRENCY.SCRAPER}`, 'scrape');\n const urls = inputs.map((i) => i.url);\n const results = await client.scrapeMultiple(urls, { timeout: 60 });\n const urlToIndex = new Map(inputs.map((i) => [i.url, i.origIndex]));\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [];\n const jinaFallbacks: JinaFallback[] = [];\n let successful = 0;\n let failed = 0;\n let totalCredits = 0;\n\n for (let i = 0; i < results.length; i++) {\n const result = results[i];\n const origIndex = inputs[i]!.origIndex;\n if (!result) {\n failed++;\n failedContents.push(`## ${inputs[i]!.url}\\n\\n\u274C No result returned`);\n continue;\n }\n\n // Binary document detected by content-type \u2014 defer to Jina Reader.\n if (result.error?.code === ErrorCode.UNSUPPORTED_BINARY_CONTENT) {\n jinaFallbacks.push({\n url: result.url,\n origIndex: urlToIndex.get(result.url) ?? origIndex,\n reason: 'binary_content',\n });\n continue;\n }\n\n // Scrape.do failure \u2014 only 404s are treated as hard fails (Jina won't\n // help when the page genuinely doesn't exist). Every other failure mode\n // (302 redirect loops, WAF blocks, timeouts, 5xx, service unavailable)\n // gets a second chance through Jina Reader, which uses different IPs\n // and handles many anti-bot surfaces differently.\n const scrapeFailed = Boolean(result.error) || result.statusCode < 200 || result.statusCode >= 300;\n if (scrapeFailed && result.statusCode !== 404) {\n jinaFallbacks.push({\n url: result.url,\n origIndex: urlToIndex.get(result.url) ?? origIndex,\n reason: 'scrape_failed',\n scrapeError: result.error?.message || result.content || `HTTP ${result.statusCode}`,\n });\n continue;\n }\n if (scrapeFailed) {\n failed++;\n failedContents.push(`## ${result.url}\\n\\n\u274C Failed to scrape: HTTP 404 \u2014 Page not found`);\n continue;\n }\n\n successful++;\n totalCredits += result.credits;\n\n let content: string;\n try {\n const readable = extractReadableContent(result.content, result.url);\n const sourceForCleaner = readable.extracted ? readable.content : result.content;\n content = markdownCleaner.processContent(sourceForCleaner);\n } catch {\n content = result.content;\n }\n\n successItems.push({ url: result.url, content, index: origIndex });\n }\n\n return {\n successItems,\n failedContents,\n metrics: { successful, failed, totalCredits },\n jinaFallbacks,\n };\n}\n\n// --- Document branch (Jina Reader) ---\n\n/**\n * Format a Jina-failure line. If the URL was deferred here *after* Scrape.do\n * already failed, surface both layers' errors so the caller can see that this\n * isn't just a Jina glitch \u2014 the primary path failed too.\n *\n * Exported for unit testing.\n */\nexport function formatJinaFailure(url: string, jinaError: string, scrapeError?: string): string {\n if (scrapeError) {\n return `## ${url}\\n\\n\u274C Both scrapers failed. Scrape.do: ${scrapeError}. Jina Reader: ${jinaError}.`;\n }\n return `## ${url}\\n\\n\u274C Document conversion failed: ${jinaError}`;\n}\n\nasync function fetchDocumentBranch(\n inputs: BranchInput[],\n jinaClient: JinaClient,\n /** Optional: map url \u2192 original Scrape.do error, for fallback messaging. */\n scrapeErrorContext?: Map<string, string>,\n): Promise<ScrapePhaseResult> {\n if (inputs.length === 0) {\n return { successItems: [], failedContents: [], metrics: { successful: 0, failed: 0, totalCredits: 0 } };\n }\n\n mcpLog(\n 'info',\n `[concurrency] document branch (jina): converting ${inputs.length} URL(s) with limit=${CONCURRENCY.SCRAPER}`,\n 'scrape',\n );\n\n const results = await pMapSettled(\n inputs,\n (input) => jinaClient.convert({ url: input.url }),\n CONCURRENCY.SCRAPER,\n );\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [];\n let successful = 0;\n let failed = 0;\n\n for (let i = 0; i < results.length; i++) {\n const settled = results[i];\n const input = inputs[i]!;\n const scrapeError = scrapeErrorContext?.get(input.url);\n if (!settled) {\n failed++;\n failedContents.push(formatJinaFailure(input.url, 'No result returned', scrapeError));\n continue;\n }\n if (settled.status === 'rejected') {\n failed++;\n const reason = settled.reason instanceof Error ? settled.reason.message : String(settled.reason);\n failedContents.push(formatJinaFailure(input.url, reason, scrapeError));\n continue;\n }\n\n const result = settled.value;\n if (result.error || result.statusCode < 200 || result.statusCode >= 300) {\n failed++;\n const errorMsg = result.error?.message || `HTTP ${result.statusCode}`;\n failedContents.push(formatJinaFailure(input.url, errorMsg, scrapeError));\n continue;\n }\n\n successful++;\n successItems.push({ url: input.url, content: result.content, index: input.origIndex });\n }\n\n return { successItems, failedContents, metrics: { successful, failed, totalCredits: 0 } };\n}\n\n// --- Reddit branch ---\n\nfunction formatRedditPostAsMarkdown(result: PostResult): string {\n const { post, comments } = result;\n const lines: string[] = [];\n lines.push(`# ${post.title}`);\n lines.push('');\n lines.push(`**r/${post.subreddit}** \u2022 u/${post.author} \u2022 \u2B06\uFE0F ${post.score} \u2022 \uD83D\uDCAC ${post.commentCount} comments`);\n lines.push(`\uD83D\uDD17 ${post.url}`);\n lines.push('');\n if (post.body) {\n lines.push('## Post content');\n lines.push('');\n lines.push(post.body);\n lines.push('');\n }\n if (comments.length > 0) {\n lines.push(`## Top comments (${comments.length} total)`);\n lines.push('');\n for (const c of comments) {\n const indent = ' '.repeat(c.depth);\n const op = c.isOP ? ' **[OP]**' : '';\n const score = c.score >= 0 ? `+${c.score}` : `${c.score}`;\n lines.push(`${indent}- **u/${c.author}**${op} _(${score})_`);\n for (const line of c.body.split('\\n')) {\n lines.push(`${indent} ${line}`);\n }\n lines.push('');\n }\n }\n return lines.join('\\n');\n}\n\nasync function fetchRedditBranch(inputs: BranchInput[]): Promise<ScrapePhaseResult> {\n if (inputs.length === 0) {\n return { successItems: [], failedContents: [], metrics: { successful: 0, failed: 0, totalCredits: 0 } };\n }\n\n const env = parseEnv();\n if (!env.REDDIT_CLIENT_ID || !env.REDDIT_CLIENT_SECRET) {\n const failedContents = inputs.map(\n (i) => `## ${i.url}\\n\\n\u274C Reddit URL detected, but Reddit API is not configured. Set \\`REDDIT_CLIENT_ID\\` and \\`REDDIT_CLIENT_SECRET\\` in the server env to enable threaded Reddit scraping.`,\n );\n return {\n successItems: [],\n failedContents,\n metrics: { successful: 0, failed: inputs.length, totalCredits: 0 },\n };\n }\n\n // Warn for non-permalink Reddit URLs (subreddit homepages, /new, /top, /hot,\n // user profiles). The Reddit API path we call requires /r/.../comments/... \u2014\n // reject upfront so the caller sees a helpful message instead of a 404.\n const [postInputs, nonPermalinks] = inputs.reduce<[BranchInput[], BranchInput[]]>(\n ([posts, rest], input) => {\n if (isRedditPostPermalink(input.url)) posts.push(input);\n else rest.push(input);\n return [posts, rest];\n },\n [[], []],\n );\n\n const nonPermalinkFailed = nonPermalinks.map(\n (i) => `## ${i.url}\\n\\n\u274C Only Reddit post permalinks (/r/<sub>/comments/<id>/...) are supported. Use web-search with scope:\"reddit\" to discover post permalinks first.`,\n );\n\n if (postInputs.length === 0) {\n return {\n successItems: [],\n failedContents: nonPermalinkFailed,\n metrics: { successful: 0, failed: nonPermalinks.length, totalCredits: 0 },\n };\n }\n\n mcpLog('info', `[concurrency] reddit branch: fetching ${postInputs.length} post(s) with limit=${CONCURRENCY.REDDIT}`, 'scrape');\n const client = new RedditClient(env.REDDIT_CLIENT_ID, env.REDDIT_CLIENT_SECRET);\n const urls = postInputs.map((i) => i.url);\n const batchResult = await client.batchGetPosts(urls, true);\n const urlToIndex = new Map(postInputs.map((i) => [i.url, i.origIndex]));\n\n const successItems: ProcessedResult[] = [];\n const failedContents: string[] = [...nonPermalinkFailed];\n let successful = 0;\n let failed = nonPermalinks.length;\n\n for (const [url, result] of batchResult.results) {\n const origIndex = urlToIndex.get(url) ?? -1;\n if (result instanceof Error) {\n failed++;\n failedContents.push(`## ${url}\\n\\n\u274C Reddit fetch failed: ${result.message}`);\n continue;\n }\n successful++;\n successItems.push({ url, content: formatRedditPostAsMarkdown(result), index: origIndex });\n }\n\n return { successItems, failedContents, metrics: { successful, failed, totalCredits: 0 } };\n}\n\n// --- LLM extraction (shared by both branches) ---\n\nasync function processItemsWithLlm(\n successItems: ProcessedResult[],\n enhancedInstruction: string,\n llmProcessor: ReturnType<typeof createLLMProcessor>,\n reporter: ToolReporter,\n): Promise<{ items: ProcessedResult[]; llmErrors: number; llmAttempted: number }> {\n let llmErrors = 0;\n\n if (!llmProcessor || successItems.length === 0) {\n if (!llmProcessor && successItems.length > 0) {\n mcpLog('warning', 'LLM unavailable (LLM_API_KEY not set). Returning raw scraped content.', 'scrape');\n void reporter.log('warning', 'llm_extractor_unreachable: planner not configured; raw scraped content returned');\n }\n return { items: successItems, llmErrors, llmAttempted: 0 };\n }\n\n mcpLog('info', `[concurrency] llm extraction: fanning out ${successItems.length} item(s) with limit=${CONCURRENCY.LLM_EXTRACTION}`, 'scrape');\n\n const llmResults = await pMap(\n successItems,\n async (item) => {\n mcpLog('debug', `LLM extracting ${item.url}...`, 'scrape');\n\n const llmResult = await processContentWithLLM(\n item.content,\n { enabled: true, extract: enhancedInstruction, url: item.url },\n llmProcessor,\n );\n\n if (llmResult.processed) {\n return { ...item, content: llmResult.content };\n }\n\n llmErrors++;\n mcpLog('warning', `LLM extraction failed for ${item.url}: ${llmResult.error || 'unknown reason'}`, 'scrape');\n void reporter.log('warning', `llm_extractor_unreachable: ${item.url} \u2014 ${llmResult.error || 'unknown reason'}`);\n return item;\n },\n CONCURRENCY.LLM_EXTRACTION,\n );\n\n return { items: llmResults, llmErrors, llmAttempted: successItems.length };\n}\n\n// --- Output assembly ---\n\nfunction assembleContentEntries(successItems: ProcessedResult[], failedContents: string[]): string[] {\n const sorted = [...successItems].sort((a, b) => a.index - b.index);\n const contents = [...failedContents];\n for (const item of sorted) {\n let content = item.content;\n try {\n content = removeMetaTags(content);\n } catch {\n // Use content as-is\n }\n contents.push(`## ${item.url}\\n\\n${content}`);\n }\n return contents;\n}\n\nfunction buildScrapeResponse(\n params: ScrapeLinksParams,\n contents: string[],\n metrics: ScrapeMetrics,\n llmErrors: number,\n executionTime: number,\n llmAccounting: { llmAttempted: number; llmSucceeded: boolean },\n): { content: string; structuredContent: ScrapeLinksOutput } {\n const llmExtras: Record<string, string | number> = {};\n if (llmAccounting.llmAttempted > 0) {\n const ok = llmAccounting.llmAttempted - llmErrors;\n llmExtras['LLM extraction'] = `${ok}/${llmAccounting.llmAttempted} succeeded`;\n if (!llmAccounting.llmSucceeded) {\n llmExtras['LLM credit'] = '0 charged (no extraction produced)';\n }\n } else if (llmErrors > 0) {\n llmExtras['LLM extraction failures'] = llmErrors;\n }\n\n const batchHeader = formatBatchHeader({\n title: `Scraped Content (${params.urls.length} URLs)`,\n totalItems: params.urls.length,\n successful: metrics.successful,\n failed: metrics.failed,\n extras: {\n 'Credits used': metrics.totalCredits,\n ...llmExtras,\n },\n });\n\n const formattedContent = formatSuccess({\n title: 'Scraping Complete',\n summary: batchHeader,\n data: contents.join('\\n\\n---\\n\\n'),\n metadata: {\n 'Execution time': formatDuration(executionTime),\n },\n });\n\n const metadata: ScrapeLinksOutput['metadata'] = {\n total_items: params.urls.length,\n successful: metrics.successful,\n failed: metrics.failed,\n execution_time_ms: executionTime,\n total_credits: metrics.totalCredits,\n };\n return { content: formattedContent, structuredContent: { metadata } };\n}\n\n// --- Handler ---\n\nexport async function handleScrapeLinks(\n params: ScrapeLinksParams,\n reporter: ToolReporter = NOOP_REPORTER,\n): Promise<ToolExecutionResult<ScrapeLinksOutput>> {\n const startTime = Date.now();\n\n if (!params.urls || params.urls.length === 0) {\n return createScrapeErrorResponse('NO_URLS', 'No URLs provided', startTime);\n }\n\n const { webInputs, redditInputs, documentInputs, invalidEntries } = partitionUrls(params.urls);\n const validCount = webInputs.length + redditInputs.length + documentInputs.length;\n\n await reporter.log(\n 'info',\n `Partitioned ${params.urls.length} URL(s): ${webInputs.length} web, ${redditInputs.length} reddit, ${documentInputs.length} document, ${invalidEntries.length} invalid`,\n );\n\n if (validCount === 0) {\n return createScrapeErrorResponse(\n 'INVALID_URLS',\n `All ${params.urls.length} URLs are invalid`,\n startTime,\n false,\n [\n 'web-search(queries=[...], extract=\"...\") \u2014 search for valid URLs first, then scrape the results',\n ],\n );\n }\n\n mcpLog(\n 'info',\n `Starting scrape: ${webInputs.length} web + ${redditInputs.length} reddit + ${documentInputs.length} document URL(s)`,\n 'scrape',\n );\n await reporter.progress(15, 100, 'Preparing scraper clients');\n\n // Only initialize the Scrape.do client if we actually have HTML/web URLs.\n // The Jina client is cheap (no auth needed) and always constructed so the\n // document branch and the web\u2192Jina fallback path both work uniformly.\n let clients: ScrapeClients | null = null;\n try {\n const jinaClient = new JinaClient();\n if (webInputs.length > 0) {\n clients = {\n client: new ScraperClient(),\n jinaClient,\n llmProcessor: createLLMProcessor(),\n };\n } else {\n clients = {\n client: null as unknown as ScraperClient,\n jinaClient,\n llmProcessor: createLLMProcessor(),\n };\n }\n } catch (error) {\n const err = classifyError(error);\n return createScrapeErrorResponse(\n 'CLIENT_INIT_FAILED',\n `Failed to initialize scraper: ${err.message}`,\n startTime,\n false,\n [\n 'web-search(queries=[\"topic key findings\", \"topic summary\"], extract=\"key findings and summary\") \u2014 search instead of scraping',\n ],\n );\n }\n\n const enhancedInstruction = enhanceExtractionInstruction(params.extract);\n\n await reporter.progress(35, 100, 'Fetching page content');\n\n // Phase 1 \u2014 run all three branches in parallel. Failures in one branch do\n // not block the others. The web branch may surface URLs to reroute via\n // `jinaFallbacks` (binary content-type OR non-404 Scrape.do failure),\n // which Phase 2 re-runs through Jina Reader.\n const emptyPhase: WebPhaseResult = {\n successItems: [], failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n jinaFallbacks: [],\n };\n const [webPhase, redditPhase, documentPhase] = await Promise.all([\n webInputs.length > 0\n ? fetchWebBranch(webInputs, clients.client)\n : Promise.resolve<WebPhaseResult>(emptyPhase),\n fetchRedditBranch(redditInputs),\n fetchDocumentBranch(documentInputs, clients.jinaClient),\n ]);\n\n // Phase 2 \u2014 Jina Reader as a fallback for web-branch URLs that either\n // returned binary content or failed outright on Scrape.do.\n let deferredPhase: ScrapePhaseResult = {\n successItems: [], failedContents: [],\n metrics: { successful: 0, failed: 0, totalCredits: 0 },\n };\n if (webPhase.jinaFallbacks.length > 0) {\n const binaryCount = webPhase.jinaFallbacks.filter((f) => f.reason === 'binary_content').length;\n const failedCount = webPhase.jinaFallbacks.length - binaryCount;\n await reporter.log(\n 'info',\n `Rerouting ${webPhase.jinaFallbacks.length} URL(s) to Jina Reader: ${binaryCount} binary, ${failedCount} scrape-failed`,\n );\n const fallbackInputs: BranchInput[] = webPhase.jinaFallbacks.map((f) => ({\n url: f.url,\n origIndex: f.origIndex,\n }));\n const errorContext = new Map<string, string>(\n webPhase.jinaFallbacks\n .filter((f) => f.scrapeError !== undefined)\n .map((f) => [f.url, f.scrapeError as string]),\n );\n deferredPhase = await fetchDocumentBranch(fallbackInputs, clients.jinaClient, errorContext);\n }\n\n const successItems = [\n ...webPhase.successItems,\n ...redditPhase.successItems,\n ...documentPhase.successItems,\n ...deferredPhase.successItems,\n ];\n const invalidFailed = invalidEntries.map(\n ({ url }) => `## ${url}\\n\\n\u274C Invalid URL format`,\n );\n const failedContents = [\n ...invalidFailed,\n ...webPhase.failedContents,\n ...redditPhase.failedContents,\n ...documentPhase.failedContents,\n ...deferredPhase.failedContents,\n ];\n const metrics: ScrapeMetrics = {\n successful:\n webPhase.metrics.successful\n + redditPhase.metrics.successful\n + documentPhase.metrics.successful\n + deferredPhase.metrics.successful,\n failed:\n invalidEntries.length\n + webPhase.metrics.failed\n + redditPhase.metrics.failed\n + documentPhase.metrics.failed\n + deferredPhase.metrics.failed,\n totalCredits: webPhase.metrics.totalCredits,\n };\n\n await reporter.log('info', `Fetched ${metrics.successful} page(s), ${metrics.failed} failed`);\n\n if (successItems.length > 0) {\n await reporter.progress(80, 100, 'Running LLM extraction over fetched pages');\n }\n\n const { items: processedItems, llmErrors, llmAttempted } = await processItemsWithLlm(\n successItems,\n enhancedInstruction,\n clients.llmProcessor,\n reporter,\n );\n\n const contents = assembleContentEntries(processedItems, failedContents);\n const executionTime = Date.now() - startTime;\n\n mcpLog(\n 'info',\n `Completed: ${metrics.successful} successful, ${metrics.failed} failed, ${metrics.totalCredits} credits used`,\n 'scrape',\n );\n\n const llmSucceeded = llmAttempted > 0 && llmErrors < llmAttempted;\n const result = buildScrapeResponse(\n params,\n contents,\n metrics,\n llmErrors,\n executionTime,\n { llmAttempted, llmSucceeded },\n );\n\n if (metrics.successful === 0 && metrics.failed > 0) {\n return toolFailure(result.content);\n }\n\n return toolSuccess(result.content, result.structuredContent);\n}\n\nexport function registerScrapeLinksTool(server: MCPServer): void {\n server.tool(\n {\n name: 'scrape-links',\n title: 'Scrape Links',\n description:\n 'Fetch many URLs in parallel and run per-URL structured LLM extraction. Auto-detects reddit.com post permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Safe to call in parallel \u2014 group URLs by context rather than jamming unrelated batches together. Each page returns `## Source`, `## Matches` (verbatim-preserved facts), `## Not found` (explicit gaps), and `## Follow-up signals` (new terms + referenced URLs) that feed the next research loop. Describe the SHAPE of what you want in `extract`, facets separated by `|` (e.g. `root cause | affected versions | fix | workarounds | timeline`).',\n schema: scrapeLinksParamsSchema,\n outputSchema: scrapeLinksOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: true,\n },\n },\n async (args, ctx) => {\n if (!getCapabilities().scraping) {\n return toToolResponse(toolFailure(getMissingEnvMessage('scraping')));\n }\n\n const reporter = createToolReporter(ctx, 'scrape-links');\n const result = await handleScrapeLinks(args, reporter);\n\n await reporter.progress(100, 100, result.isError ? 'Scrape failed' : 'Scrape complete');\n return toToolResponse(result);\n },\n );\n}\n"],
5
+ "mappings": "AAYA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP,SAAS,qBAAqB;AAC9B,SAAS,oBAAqC;AAC9C,SAAS,kBAAkB;AAC3B,SAAS,uBAAuB;AAChC,SAAS,oBAAoB,6BAA6B;AAC1D,SAAS,sBAAsB;AAC/B,SAAS,8BAA8B;AACvC,SAAS,eAAe,iBAAiB;AACzC,SAAS,qBAAqB;AAC9B,SAAS,MAAM,mBAAmB;AAClC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAGK;AAEP,MAAM,kBAAkB,IAAI,gBAAgB;AAE5C,SAAS,6BAA6B,aAAyC;AAC7E,QAAM,OAAO,eAAe;AAC5B,SAAO,GAAG,QAAQ,iBAAiB;AAAA;AAAA,EAAO,IAAI;AAAA;AAAA,EAAO,QAAQ,iBAAiB;AAChF;AAsDA,MAAM,cAAc;AACpB,MAAM,wBAAwB;AAE9B,SAAS,YAAY,KAAsB;AACzC,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,WAAO,YAAY,KAAK,EAAE,QAAQ;AAAA,EACpC,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,SAAS,sBAAsB,KAAsB;AACnD,MAAI;AACF,UAAM,IAAI,IAAI,IAAI,GAAG;AACrB,WAAO,YAAY,KAAK,EAAE,QAAQ,KAAK,sBAAsB,KAAK,EAAE,QAAQ;AAAA,EAC9E,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAIA,SAAS,0BACP,MACA,SACA,WACA,YAAY,OACZ,cACwC;AACxC,SAAO;AAAA,IACL,GAAG,YAAY;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA,MACV,UAAU,SAAS,YAAY,CAAC,gCAAgC,IAAI;AAAA,MACpE;AAAA,IACF,CAAC,CAAC;AAAA;AAAA,kBAAuB,eAAe,KAAK,IAAI,IAAI,SAAS,CAAC;AAAA,EACjE;AACF;AAWA,SAAS,cAAc,MAAiC;AACtD,QAAM,YAA2B,CAAC;AAClC,QAAM,eAA8B,CAAC;AACrC,QAAM,iBAAgC,CAAC;AACvC,QAAM,iBAAuD,CAAC;AAE9D,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,MAAM,KAAK,CAAC;AAClB,QAAI;AACF,UAAI,IAAI,GAAG;AAAA,IACb,QAAQ;AACN,qBAAe,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AACzC;AAAA,IACF;AAKA,QAAI,cAAc,GAAG,GAAG;AACtB,qBAAe,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IAC3C,WAAW,YAAY,GAAG,GAAG;AAC3B,mBAAa,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IACzC,OAAO;AACL,gBAAU,KAAK,EAAE,KAAK,WAAW,EAAE,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO,EAAE,WAAW,cAAc,gBAAgB,eAAe;AACnE;AAIA,eAAe,eACb,QACA,QACyB;AACzB,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAgB,CAAC;AAAA,MACjB,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,MACrD,eAAe,CAAC;AAAA,IAClB;AAAA,EACF;AAEA,SAAO,QAAQ,yCAAyC,OAAO,MAAM,sBAAsB,YAAY,OAAO,IAAI,QAAQ;AAC1H,QAAM,OAAO,OAAO,IAAI,CAAC,MAAM,EAAE,GAAG;AACpC,QAAM,UAAU,MAAM,OAAO,eAAe,MAAM,EAAE,SAAS,GAAG,CAAC;AACjE,QAAM,aAAa,IAAI,IAAI,OAAO,IAAI,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,CAAC;AAElE,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC;AAClC,QAAM,gBAAgC,CAAC;AACvC,MAAI,aAAa;AACjB,MAAI,SAAS;AACb,MAAI,eAAe;AAEnB,WAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,UAAM,SAAS,QAAQ,CAAC;AACxB,UAAM,YAAY,OAAO,CAAC,EAAG;AAC7B,QAAI,CAAC,QAAQ;AACX;AACA,qBAAe,KAAK,MAAM,OAAO,CAAC,EAAG,GAAG;AAAA;AAAA,0BAA0B;AAClE;AAAA,IACF;AAGA,QAAI,OAAO,OAAO,SAAS,UAAU,4BAA4B;AAC/D,oBAAc,KAAK;AAAA,QACjB,KAAK,OAAO;AAAA,QACZ,WAAW,WAAW,IAAI,OAAO,GAAG,KAAK;AAAA,QACzC,QAAQ;AAAA,MACV,CAAC;AACD;AAAA,IACF;AAOA,UAAM,eAAe,QAAQ,OAAO,KAAK,KAAK,OAAO,aAAa,OAAO,OAAO,cAAc;AAC9F,QAAI,gBAAgB,OAAO,eAAe,KAAK;AAC7C,oBAAc,KAAK;AAAA,QACjB,KAAK,OAAO;AAAA,QACZ,WAAW,WAAW,IAAI,OAAO,GAAG,KAAK;AAAA,QACzC,QAAQ;AAAA,QACR,aAAa,OAAO,OAAO,WAAW,OAAO,WAAW,QAAQ,OAAO,UAAU;AAAA,MACnF,CAAC;AACD;AAAA,IACF;AACA,QAAI,cAAc;AAChB;AACA,qBAAe,KAAK,MAAM,OAAO,GAAG;AAAA;AAAA,wDAAmD;AACvF;AAAA,IACF;AAEA;AACA,oBAAgB,OAAO;AAEvB,QAAI;AACJ,QAAI;AACF,YAAM,WAAW,uBAAuB,OAAO,SAAS,OAAO,GAAG;AAClE,YAAM,mBAAmB,SAAS,YAAY,SAAS,UAAU,OAAO;AACxE,gBAAU,gBAAgB,eAAe,gBAAgB;AAAA,IAC3D,QAAQ;AACN,gBAAU,OAAO;AAAA,IACnB;AAEA,iBAAa,KAAK,EAAE,KAAK,OAAO,KAAK,SAAS,OAAO,UAAU,CAAC;AAAA,EAClE;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,SAAS,EAAE,YAAY,QAAQ,aAAa;AAAA,IAC5C;AAAA,EACF;AACF;AAWO,SAAS,kBAAkB,KAAa,WAAmB,aAA8B;AAC9F,MAAI,aAAa;AACf,WAAO,MAAM,GAAG;AAAA;AAAA,0CAA0C,WAAW,kBAAkB,SAAS;AAAA,EAClG;AACA,SAAO,MAAM,GAAG;AAAA;AAAA,qCAAqC,SAAS;AAChE;AAEA,eAAe,oBACb,QACA,YAEA,oBAC4B;AAC5B,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO,EAAE,cAAc,CAAC,GAAG,gBAAgB,CAAC,GAAG,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE,EAAE;AAAA,EACxG;AAEA;AAAA,IACE;AAAA,IACA,oDAAoD,OAAO,MAAM,sBAAsB,YAAY,OAAO;AAAA,IAC1G;AAAA,EACF;AAEA,QAAM,UAAU,MAAM;AAAA,IACpB;AAAA,IACA,CAAC,UAAU,WAAW,QAAQ,EAAE,KAAK,MAAM,IAAI,CAAC;AAAA,IAChD,YAAY;AAAA,EACd;AAEA,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC;AAClC,MAAI,aAAa;AACjB,MAAI,SAAS;AAEb,WAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,UAAM,UAAU,QAAQ,CAAC;AACzB,UAAM,QAAQ,OAAO,CAAC;AACtB,UAAM,cAAc,oBAAoB,IAAI,MAAM,GAAG;AACrD,QAAI,CAAC,SAAS;AACZ;AACA,qBAAe,KAAK,kBAAkB,MAAM,KAAK,sBAAsB,WAAW,CAAC;AACnF;AAAA,IACF;AACA,QAAI,QAAQ,WAAW,YAAY;AACjC;AACA,YAAM,SAAS,QAAQ,kBAAkB,QAAQ,QAAQ,OAAO,UAAU,OAAO,QAAQ,MAAM;AAC/F,qBAAe,KAAK,kBAAkB,MAAM,KAAK,QAAQ,WAAW,CAAC;AACrE;AAAA,IACF;AAEA,UAAM,SAAS,QAAQ;AACvB,QAAI,OAAO,SAAS,OAAO,aAAa,OAAO,OAAO,cAAc,KAAK;AACvE;AACA,YAAM,WAAW,OAAO,OAAO,WAAW,QAAQ,OAAO,UAAU;AACnE,qBAAe,KAAK,kBAAkB,MAAM,KAAK,UAAU,WAAW,CAAC;AACvE;AAAA,IACF;AAEA;AACA,iBAAa,KAAK,EAAE,KAAK,MAAM,KAAK,SAAS,OAAO,SAAS,OAAO,MAAM,UAAU,CAAC;AAAA,EACvF;AAEA,SAAO,EAAE,cAAc,gBAAgB,SAAS,EAAE,YAAY,QAAQ,cAAc,EAAE,EAAE;AAC1F;AAIA,SAAS,2BAA2B,QAA4B;AAC9D,QAAM,EAAE,MAAM,SAAS,IAAI;AAC3B,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,KAAK,KAAK,KAAK,EAAE;AAC5B,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,OAAO,KAAK,SAAS,eAAU,KAAK,MAAM,wBAAS,KAAK,KAAK,qBAAS,KAAK,YAAY,WAAW;AAC7G,QAAM,KAAK,aAAM,KAAK,GAAG,EAAE;AAC3B,QAAM,KAAK,EAAE;AACb,MAAI,KAAK,MAAM;AACb,UAAM,KAAK,iBAAiB;AAC5B,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,KAAK,IAAI;AACpB,UAAM,KAAK,EAAE;AAAA,EACf;AACA,MAAI,SAAS,SAAS,GAAG;AACvB,UAAM,KAAK,oBAAoB,SAAS,MAAM,SAAS;AACvD,UAAM,KAAK,EAAE;AACb,eAAW,KAAK,UAAU;AACxB,YAAM,SAAS,KAAK,OAAO,EAAE,KAAK;AAClC,YAAM,KAAK,EAAE,OAAO,cAAc;AAClC,YAAM,QAAQ,EAAE,SAAS,IAAI,IAAI,EAAE,KAAK,KAAK,GAAG,EAAE,KAAK;AACvD,YAAM,KAAK,GAAG,MAAM,SAAS,EAAE,MAAM,KAAK,EAAE,MAAM,KAAK,IAAI;AAC3D,iBAAW,QAAQ,EAAE,KAAK,MAAM,IAAI,GAAG;AACrC,cAAM,KAAK,GAAG,MAAM,KAAK,IAAI,EAAE;AAAA,MACjC;AACA,YAAM,KAAK,EAAE;AAAA,IACf;AAAA,EACF;AACA,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,eAAe,kBAAkB,QAAmD;AAClF,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO,EAAE,cAAc,CAAC,GAAG,gBAAgB,CAAC,GAAG,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE,EAAE;AAAA,EACxG;AAEA,QAAM,MAAM,SAAS;AACrB,MAAI,CAAC,IAAI,oBAAoB,CAAC,IAAI,sBAAsB;AACtD,UAAMA,kBAAiB,OAAO;AAAA,MAC5B,CAAC,MAAM,MAAM,EAAE,GAAG;AAAA;AAAA;AAAA,IACpB;AACA,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAAA;AAAA,MACA,SAAS,EAAE,YAAY,GAAG,QAAQ,OAAO,QAAQ,cAAc,EAAE;AAAA,IACnE;AAAA,EACF;AAKA,QAAM,CAAC,YAAY,aAAa,IAAI,OAAO;AAAA,IACzC,CAAC,CAAC,OAAO,IAAI,GAAG,UAAU;AACxB,UAAI,sBAAsB,MAAM,GAAG,EAAG,OAAM,KAAK,KAAK;AAAA,UACjD,MAAK,KAAK,KAAK;AACpB,aAAO,CAAC,OAAO,IAAI;AAAA,IACrB;AAAA,IACA,CAAC,CAAC,GAAG,CAAC,CAAC;AAAA,EACT;AAEA,QAAM,qBAAqB,cAAc;AAAA,IACvC,CAAC,MAAM,MAAM,EAAE,GAAG;AAAA;AAAA;AAAA,EACpB;AAEA,MAAI,WAAW,WAAW,GAAG;AAC3B,WAAO;AAAA,MACL,cAAc,CAAC;AAAA,MACf,gBAAgB;AAAA,MAChB,SAAS,EAAE,YAAY,GAAG,QAAQ,cAAc,QAAQ,cAAc,EAAE;AAAA,IAC1E;AAAA,EACF;AAEA,SAAO,QAAQ,yCAAyC,WAAW,MAAM,uBAAuB,YAAY,MAAM,IAAI,QAAQ;AAC9H,QAAM,SAAS,IAAI,aAAa,IAAI,kBAAkB,IAAI,oBAAoB;AAC9E,QAAM,OAAO,WAAW,IAAI,CAAC,MAAM,EAAE,GAAG;AACxC,QAAM,cAAc,MAAM,OAAO,cAAc,MAAM,IAAI;AACzD,QAAM,aAAa,IAAI,IAAI,WAAW,IAAI,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,CAAC;AAEtE,QAAM,eAAkC,CAAC;AACzC,QAAM,iBAA2B,CAAC,GAAG,kBAAkB;AACvD,MAAI,aAAa;AACjB,MAAI,SAAS,cAAc;AAE3B,aAAW,CAAC,KAAK,MAAM,KAAK,YAAY,SAAS;AAC/C,UAAM,YAAY,WAAW,IAAI,GAAG,KAAK;AACzC,QAAI,kBAAkB,OAAO;AAC3B;AACA,qBAAe,KAAK,MAAM,GAAG;AAAA;AAAA,8BAA8B,OAAO,OAAO,EAAE;AAC3E;AAAA,IACF;AACA;AACA,iBAAa,KAAK,EAAE,KAAK,SAAS,2BAA2B,MAAM,GAAG,OAAO,UAAU,CAAC;AAAA,EAC1F;AAEA,SAAO,EAAE,cAAc,gBAAgB,SAAS,EAAE,YAAY,QAAQ,cAAc,EAAE,EAAE;AAC1F;AAIA,eAAe,oBACb,cACA,qBACA,cACA,UACgF;AAChF,MAAI,YAAY;AAEhB,MAAI,CAAC,gBAAgB,aAAa,WAAW,GAAG;AAC9C,QAAI,CAAC,gBAAgB,aAAa,SAAS,GAAG;AAC5C,aAAO,WAAW,yEAAyE,QAAQ;AACnG,WAAK,SAAS,IAAI,WAAW,iFAAiF;AAAA,IAChH;AACA,WAAO,EAAE,OAAO,cAAc,WAAW,cAAc,EAAE;AAAA,EAC3D;AAEA,SAAO,QAAQ,6CAA6C,aAAa,MAAM,uBAAuB,YAAY,cAAc,IAAI,QAAQ;AAE5I,QAAM,aAAa,MAAM;AAAA,IACvB;AAAA,IACA,OAAO,SAAS;AACd,aAAO,SAAS,kBAAkB,KAAK,GAAG,OAAO,QAAQ;AAEzD,YAAM,YAAY,MAAM;AAAA,QACtB,KAAK;AAAA,QACL,EAAE,SAAS,MAAM,SAAS,qBAAqB,KAAK,KAAK,IAAI;AAAA,QAC7D;AAAA,MACF;AAEA,UAAI,UAAU,WAAW;AACvB,eAAO,EAAE,GAAG,MAAM,SAAS,UAAU,QAAQ;AAAA,MAC/C;AAEA;AACA,aAAO,WAAW,6BAA6B,KAAK,GAAG,KAAK,UAAU,SAAS,gBAAgB,IAAI,QAAQ;AAC3G,WAAK,SAAS,IAAI,WAAW,8BAA8B,KAAK,GAAG,WAAM,UAAU,SAAS,gBAAgB,EAAE;AAC9G,aAAO;AAAA,IACT;AAAA,IACA,YAAY;AAAA,EACd;AAEA,SAAO,EAAE,OAAO,YAAY,WAAW,cAAc,aAAa,OAAO;AAC3E;AAIA,SAAS,uBAAuB,cAAiC,gBAAoC;AACnG,QAAM,SAAS,CAAC,GAAG,YAAY,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AACjE,QAAM,WAAW,CAAC,GAAG,cAAc;AACnC,aAAW,QAAQ,QAAQ;AACzB,QAAI,UAAU,KAAK;AACnB,QAAI;AACF,gBAAU,eAAe,OAAO;AAAA,IAClC,QAAQ;AAAA,IAER;AACA,aAAS,KAAK,MAAM,KAAK,GAAG;AAAA;AAAA,EAAO,OAAO,EAAE;AAAA,EAC9C;AACA,SAAO;AACT;AAEA,SAAS,oBACP,QACA,UACA,SACA,WACA,eACA,eAC2D;AAC3D,QAAM,YAA6C,CAAC;AACpD,MAAI,cAAc,eAAe,GAAG;AAClC,UAAM,KAAK,cAAc,eAAe;AACxC,cAAU,gBAAgB,IAAI,GAAG,EAAE,IAAI,cAAc,YAAY;AACjE,QAAI,CAAC,cAAc,cAAc;AAC/B,gBAAU,YAAY,IAAI;AAAA,IAC5B;AAAA,EACF,WAAW,YAAY,GAAG;AACxB,cAAU,yBAAyB,IAAI;AAAA,EACzC;AAEA,QAAM,cAAc,kBAAkB;AAAA,IACpC,OAAO,oBAAoB,OAAO,KAAK,MAAM;AAAA,IAC7C,YAAY,OAAO,KAAK;AAAA,IACxB,YAAY,QAAQ;AAAA,IACpB,QAAQ,QAAQ;AAAA,IAChB,QAAQ;AAAA,MACN,gBAAgB,QAAQ;AAAA,MACxB,GAAG;AAAA,IACL;AAAA,EACF,CAAC;AAED,QAAM,mBAAmB,cAAc;AAAA,IACrC,OAAO;AAAA,IACP,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,aAAa;AAAA,IACjC,UAAU;AAAA,MACR,kBAAkB,eAAe,aAAa;AAAA,IAChD;AAAA,EACF,CAAC;AAED,QAAM,WAA0C;AAAA,IAC9C,aAAa,OAAO,KAAK;AAAA,IACzB,YAAY,QAAQ;AAAA,IACpB,QAAQ,QAAQ;AAAA,IAChB,mBAAmB;AAAA,IACnB,eAAe,QAAQ;AAAA,EACzB;AACA,SAAO,EAAE,SAAS,kBAAkB,mBAAmB,EAAE,SAAS,EAAE;AACtE;AAIA,eAAsB,kBACpB,QACA,WAAyB,eACwB;AACjD,QAAM,YAAY,KAAK,IAAI;AAE3B,MAAI,CAAC,OAAO,QAAQ,OAAO,KAAK,WAAW,GAAG;AAC5C,WAAO,0BAA0B,WAAW,oBAAoB,SAAS;AAAA,EAC3E;AAEA,QAAM,EAAE,WAAW,cAAc,gBAAgB,eAAe,IAAI,cAAc,OAAO,IAAI;AAC7F,QAAM,aAAa,UAAU,SAAS,aAAa,SAAS,eAAe;AAE3E,QAAM,SAAS;AAAA,IACb;AAAA,IACA,eAAe,OAAO,KAAK,MAAM,YAAY,UAAU,MAAM,SAAS,aAAa,MAAM,YAAY,eAAe,MAAM,cAAc,eAAe,MAAM;AAAA,EAC/J;AAEA,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,MACL;AAAA,MACA,OAAO,OAAO,KAAK,MAAM;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,QACE;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA;AAAA,IACE;AAAA,IACA,oBAAoB,UAAU,MAAM,UAAU,aAAa,MAAM,aAAa,eAAe,MAAM;AAAA,IACnG;AAAA,EACF;AACA,QAAM,SAAS,SAAS,IAAI,KAAK,2BAA2B;AAK5D,MAAI,UAAgC;AACpC,MAAI;AACF,UAAM,aAAa,IAAI,WAAW;AAClC,QAAI,UAAU,SAAS,GAAG;AACxB,gBAAU;AAAA,QACR,QAAQ,IAAI,cAAc;AAAA,QAC1B;AAAA,QACA,cAAc,mBAAmB;AAAA,MACnC;AAAA,IACF,OAAO;AACL,gBAAU;AAAA,QACR,QAAQ;AAAA,QACR;AAAA,QACA,cAAc,mBAAmB;AAAA,MACnC;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,UAAM,MAAM,cAAc,KAAK;AAC/B,WAAO;AAAA,MACL;AAAA,MACA,iCAAiC,IAAI,OAAO;AAAA,MAC5C;AAAA,MACA;AAAA,MACA;AAAA,QACE;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,sBAAsB,6BAA6B,OAAO,OAAO;AAEvE,QAAM,SAAS,SAAS,IAAI,KAAK,uBAAuB;AAMxD,QAAM,aAA6B;AAAA,IACjC,cAAc,CAAC;AAAA,IAAG,gBAAgB,CAAC;AAAA,IACnC,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,IACrD,eAAe,CAAC;AAAA,EAClB;AACA,QAAM,CAAC,UAAU,aAAa,aAAa,IAAI,MAAM,QAAQ,IAAI;AAAA,IAC/D,UAAU,SAAS,IACf,eAAe,WAAW,QAAQ,MAAM,IACxC,QAAQ,QAAwB,UAAU;AAAA,IAC9C,kBAAkB,YAAY;AAAA,IAC9B,oBAAoB,gBAAgB,QAAQ,UAAU;AAAA,EACxD,CAAC;AAID,MAAI,gBAAmC;AAAA,IACrC,cAAc,CAAC;AAAA,IAAG,gBAAgB,CAAC;AAAA,IACnC,SAAS,EAAE,YAAY,GAAG,QAAQ,GAAG,cAAc,EAAE;AAAA,EACvD;AACA,MAAI,SAAS,cAAc,SAAS,GAAG;AACrC,UAAM,cAAc,SAAS,cAAc,OAAO,CAAC,MAAM,EAAE,WAAW,gBAAgB,EAAE;AACxF,UAAM,cAAc,SAAS,cAAc,SAAS;AACpD,UAAM,SAAS;AAAA,MACb;AAAA,MACA,aAAa,SAAS,cAAc,MAAM,2BAA2B,WAAW,YAAY,WAAW;AAAA,IACzG;AACA,UAAM,iBAAgC,SAAS,cAAc,IAAI,CAAC,OAAO;AAAA,MACvE,KAAK,EAAE;AAAA,MACP,WAAW,EAAE;AAAA,IACf,EAAE;AACF,UAAM,eAAe,IAAI;AAAA,MACvB,SAAS,cACN,OAAO,CAAC,MAAM,EAAE,gBAAgB,MAAS,EACzC,IAAI,CAAC,MAAM,CAAC,EAAE,KAAK,EAAE,WAAqB,CAAC;AAAA,IAChD;AACA,oBAAgB,MAAM,oBAAoB,gBAAgB,QAAQ,YAAY,YAAY;AAAA,EAC5F;AAEA,QAAM,eAAe;AAAA,IACnB,GAAG,SAAS;AAAA,IACZ,GAAG,YAAY;AAAA,IACf,GAAG,cAAc;AAAA,IACjB,GAAG,cAAc;AAAA,EACnB;AACA,QAAM,gBAAgB,eAAe;AAAA,IACnC,CAAC,EAAE,IAAI,MAAM,MAAM,GAAG;AAAA;AAAA;AAAA,EACxB;AACA,QAAM,iBAAiB;AAAA,IACrB,GAAG;AAAA,IACH,GAAG,SAAS;AAAA,IACZ,GAAG,YAAY;AAAA,IACf,GAAG,cAAc;AAAA,IACjB,GAAG,cAAc;AAAA,EACnB;AACA,QAAM,UAAyB;AAAA,IAC7B,YACE,SAAS,QAAQ,aACf,YAAY,QAAQ,aACpB,cAAc,QAAQ,aACtB,cAAc,QAAQ;AAAA,IAC1B,QACE,eAAe,SACb,SAAS,QAAQ,SACjB,YAAY,QAAQ,SACpB,cAAc,QAAQ,SACtB,cAAc,QAAQ;AAAA,IAC1B,cAAc,SAAS,QAAQ;AAAA,EACjC;AAEA,QAAM,SAAS,IAAI,QAAQ,WAAW,QAAQ,UAAU,aAAa,QAAQ,MAAM,SAAS;AAE5F,MAAI,aAAa,SAAS,GAAG;AAC3B,UAAM,SAAS,SAAS,IAAI,KAAK,2CAA2C;AAAA,EAC9E;AAEA,QAAM,EAAE,OAAO,gBAAgB,WAAW,aAAa,IAAI,MAAM;AAAA,IAC/D;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,EACF;AAEA,QAAM,WAAW,uBAAuB,gBAAgB,cAAc;AACtE,QAAM,gBAAgB,KAAK,IAAI,IAAI;AAEnC;AAAA,IACE;AAAA,IACA,cAAc,QAAQ,UAAU,gBAAgB,QAAQ,MAAM,YAAY,QAAQ,YAAY;AAAA,IAC9F;AAAA,EACF;AAEA,QAAM,eAAe,eAAe,KAAK,YAAY;AACrD,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,EAAE,cAAc,aAAa;AAAA,EAC/B;AAEA,MAAI,QAAQ,eAAe,KAAK,QAAQ,SAAS,GAAG;AAClD,WAAO,YAAY,OAAO,OAAO;AAAA,EACnC;AAEA,SAAO,YAAY,OAAO,SAAS,OAAO,iBAAiB;AAC7D;AAEO,SAAS,wBAAwB,QAAyB;AAC/D,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,MAAM,QAAQ;AACnB,UAAI,CAAC,gBAAgB,EAAE,UAAU;AAC/B,eAAO,eAAe,YAAY,qBAAqB,UAAU,CAAC,CAAC;AAAA,MACrE;AAEA,YAAM,WAAW,mBAAmB,KAAK,cAAc;AACvD,YAAM,SAAS,MAAM,kBAAkB,MAAM,QAAQ;AAErD,YAAM,SAAS,SAAS,KAAK,KAAK,OAAO,UAAU,kBAAkB,iBAAiB;AACtF,aAAO,eAAe,MAAM;AAAA,IAC9B;AAAA,EACF;AACF;",
6
6
  "names": ["failedContents"]
7
7
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-researchpowerpack",
3
- "version": "6.0.6",
3
+ "version": "6.0.7",
4
4
  "description": "HTTP-first MCP research server: start-research (goal-tailored brief), web-search (with Reddit scope), scrape-links (auto-detects Reddit URLs) — built on mcp-use.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",