mcp-researchpowerpack 6.0.13 → 6.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,7 +14,7 @@ Built on [mcp-use](https://github.com/nicepkg/mcp-use). No stdio, HTTP only.
14
14
  | `web-search` | parallel Google search, up to 50 queries per call, parallel-callable across turns. `scope: "web" \| "reddit" \| "both"` — reddit mode filters to post permalinks. Returns tiered markdown (HIGHLY_RELEVANT / MAYBE_RELEVANT / OTHER) + grounded synthesis + gaps + refine suggestions. | `SERPER_API_KEY` |
15
15
  | `scrape-links` | fetch URLs in parallel with per-URL LLM extraction. Auto-detects `reddit.com/r/.../comments/` permalinks and routes them through the Reddit API (threaded post + comments); PDF / DOCX / PPTX / XLSX URLs route through Jina Reader; non-reddit, non-document web URLs flow through Scrape.do. Parallel-callable. | `SCRAPEDO_API_KEY` for web URLs (+ `REDDIT_CLIENT_ID` / `REDDIT_CLIENT_SECRET` for reddit URLs; optional `JINA_API_KEY` for higher document limits) |
16
16
 
17
- Also exposes `/health`, `health://status`, and two optional MCP prompts: `deep-research` and `reddit-sentiment`.
17
+ Also exposes `/health` and `health://status`.
18
18
 
19
19
  ## workflow
20
20
 
@@ -130,9 +130,8 @@ index.ts server startup, cors, health, shutdown
130
130
  src/
131
131
  config/ env parsing, capability detection, lazy proxy config
132
132
  clients/ provider API clients (serper, reddit, scrapedo, jina)
133
- prompts/ optional MCP prompts for deep-research and reddit-sentiment
134
133
  tools/
135
- registry.ts registerAllTools() — wires 3 tools + 2 prompts
134
+ registry.ts registerAllTools() — wires 3 tools
136
135
  start-research.ts goal-tailored brief + static playbook
137
136
  search.ts web-search handler (with CTR-weighted URL aggregation + LLM classification)
138
137
  scrape.ts scrape-links handler (reddit + web + document branches in parallel)
package/dist/mcp-use.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "includeInspector": false,
3
- "buildTime": "2026-04-30T14:44:35.838Z",
4
- "buildId": "1314b8dd2700c1d7",
3
+ "buildTime": "2026-04-30T16:01:54.172Z",
4
+ "buildId": "c60363f4704d5b35",
5
5
  "entryPoint": "dist/index.js",
6
6
  "widgets": {}
7
7
  }
@@ -7,9 +7,7 @@ const startResearchParamsSchema = z.object({
7
7
  "Include the full 3-tool research playbook (toolbelt overview, the loop, output discipline). Default false \u2014 when the LLM planner is offline the server emits a compact stub that already names the 3 tools and the loop. Pass true only if the agent needs the verbose tactic reference, or to override the degraded-mode shrink."
8
8
  )
9
9
  }).strict();
10
- const startResearchOutputSchema = z.object({}).strict();
11
10
  export {
12
- startResearchOutputSchema,
13
11
  startResearchParamsSchema
14
12
  };
15
13
  //# sourceMappingURL=start-research.js.map
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/schemas/start-research.ts"],
4
- "sourcesContent": ["import { z } from 'zod';\n\nexport const startResearchParamsSchema = z.object({\n goal: z\n .string()\n .min(1, { message: 'start-research: goal cannot be empty' })\n .optional()\n .describe(\n 'Research goal for this session. When provided AND the LLM planner (LLM_API_KEY) is available, the server returns a goal-tailored brief: classified goal type (spec | bug | migration | sentiment | pricing | security | synthesis | product_launch), a `primary_branch` recommendation (reddit for sentiment/migration; web for spec/bug/pricing; both when opinion-heavy AND needs official sources), the exact `first_call_sequence` of web-search + scrape-links calls to fire, 25\u201350 keyword seeds for the first `web-search` call, iteration hints, gaps to watch, and stop criteria. No goal \u2192 the generic 3-tool playbook (no tailored brief). Write the goal as you would to a human researcher \u2014 one or two sentences, specific about what \"done\" looks like.',\n ),\n include_playbook: z\n .boolean()\n .default(false)\n .describe(\n 'Include the full 3-tool research playbook (toolbelt overview, the loop, output discipline). Default false \u2014 when the LLM planner is offline the server emits a compact stub that already names the 3 tools and the loop. Pass true only if the agent needs the verbose tactic reference, or to override the degraded-mode shrink.',\n ),\n}).strict();\n\n// No output schema: `start-research` returns pure markdown via `content[0].text`.\n// There is nothing structured worth exposing (no per-row data, no metadata).\n// Clients read the primary text output; `structuredContent` is omitted entirely.\nexport const startResearchOutputSchema = z.object({}).strict();\n\nexport type StartResearchParams = z.infer<typeof startResearchParamsSchema>;\nexport type StartResearchOutput = z.infer<typeof startResearchOutputSchema>;\n"],
5
- "mappings": "AAAA,SAAS,SAAS;AAEX,MAAM,4BAA4B,EAAE,OAAO;AAAA,EAChD,MAAM,EACH,OAAO,EACP,IAAI,GAAG,EAAE,SAAS,uCAAuC,CAAC,EAC1D,SAAS,EACT;AAAA,IACC;AAAA,EACF;AAAA,EACF,kBAAkB,EACf,QAAQ,EACR,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AACJ,CAAC,EAAE,OAAO;AAKH,MAAM,4BAA4B,EAAE,OAAO,CAAC,CAAC,EAAE,OAAO;",
4
+ "sourcesContent": ["import { z } from 'zod';\n\nexport const startResearchParamsSchema = z.object({\n goal: z\n .string()\n .min(1, { message: 'start-research: goal cannot be empty' })\n .optional()\n .describe(\n 'Research goal for this session. When provided AND the LLM planner (LLM_API_KEY) is available, the server returns a goal-tailored brief: classified goal type (spec | bug | migration | sentiment | pricing | security | synthesis | product_launch), a `primary_branch` recommendation (reddit for sentiment/migration; web for spec/bug/pricing; both when opinion-heavy AND needs official sources), the exact `first_call_sequence` of web-search + scrape-links calls to fire, 25\u201350 keyword seeds for the first `web-search` call, iteration hints, gaps to watch, and stop criteria. No goal \u2192 the generic 3-tool playbook (no tailored brief). Write the goal as you would to a human researcher \u2014 one or two sentences, specific about what \"done\" looks like.',\n ),\n include_playbook: z\n .boolean()\n .default(false)\n .describe(\n 'Include the full 3-tool research playbook (toolbelt overview, the loop, output discipline). Default false \u2014 when the LLM planner is offline the server emits a compact stub that already names the 3 tools and the loop. Pass true only if the agent needs the verbose tactic reference, or to override the degraded-mode shrink.',\n ),\n}).strict();\n\nexport type StartResearchParams = z.infer<typeof startResearchParamsSchema>;\n\n// `start-research` is text-only: the tool registration deliberately omits\n// `outputSchema`, and successful calls omit `structuredContent`.\nexport type StartResearchOutput = Record<string, never>;\n"],
5
+ "mappings": "AAAA,SAAS,SAAS;AAEX,MAAM,4BAA4B,EAAE,OAAO;AAAA,EAChD,MAAM,EACH,OAAO,EACP,IAAI,GAAG,EAAE,SAAS,uCAAuC,CAAC,EAC1D,SAAS,EACT;AAAA,IACC;AAAA,EACF;AAAA,EACF,kBAAkB,EACf,QAAQ,EACR,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AACJ,CAAC,EAAE,OAAO;",
6
6
  "names": []
7
7
  }
@@ -17,10 +17,9 @@ const webSearchParamsSchema = z.object({
17
17
  )
18
18
  }).strict();
19
19
  const webSearchOutputSchema = z.object({
20
- // `content` deliberately NOT duplicated here — the primary markdown lives in
21
- // the MCP tool result's `content[0].text`. Previously this schema echoed the
22
- // whole markdown under `structuredContent.content`, doubling token cost for
23
- // clients that forward both fields to an LLM.
20
+ content: z.string().describe(
21
+ "Rendered search report, including ranked URLs, classification synthesis, gaps, and follow-up searches. Duplicates the MCP content text for clients that only expose structuredContent."
22
+ ),
24
23
  results: z.array(z.object({
25
24
  rank: z.number().int().positive().describe("1-based rank in the merged ranking."),
26
25
  url: z.string().describe("Result URL."),
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/schemas/web-search.ts"],
4
- "sourcesContent": ["import { z } from 'zod';\n\nexport const webSearchParamsSchema = z.object({\n queries: z\n .array(\n z.string()\n .min(1, { message: 'web-search: Query cannot be empty' })\n .describe('A single Google search query. Each query runs as a separate parallel search. Use operators (site:, quotes, verbatim version numbers) to sharpen retrieval.'),\n )\n .min(1, { message: 'web-search: At least 1 query required' })\n .describe(\n 'Search queries to run in parallel via Google. Think of these as **concept groups** \u2014 clusters of semantically distinct facets of your research goal, each probing a DIFFERENT angle (official spec, implementation, failures, comparison, sentiment, changelog, CVE, pricing). Fire all groups in ONE call as a flat array. Overlapping queries waste budget; orthogonal facets multiply coverage. A narrow bug needs 10\u201320 queries across 2\u20133 facets; a comparison needs 25\u201335 across 4\u20136 facets; open-ended synthesis needs 40\u201380 across 8+ facets.',\n ),\n extract: z\n .string()\n .min(1, { message: 'web-search: extract cannot be empty' })\n .describe(\n 'Semantic instruction for the relevance classifier \u2014 what \"relevant\" means for THIS goal. Drives tiering (HIGHLY_RELEVANT / MAYBE_RELEVANT / OTHER), synthesis, gap analysis, and refine-query suggestions. Be specific: \"OAuth 2.1 support in TypeScript MCP frameworks \u2014 runnable code, not marketing\", not \"MCP OAuth\". The classifier uses this to choose a source-of-truth rubric (vendor_doc for spec, github for bugs, reddit/blog for migration/sentiment, cve_databases for security).',\n ),\n raw: z\n .boolean()\n .default(false)\n .describe('Skip LLM classification and return the raw ranked URL list. Use when you need unprocessed results.'),\n scope: z\n .enum(['web', 'reddit', 'both'])\n .default('web')\n .describe(\n 'Search scope. \"web\" (default) = open web, no augmentation. \"reddit\" = server appends `site:reddit.com` to every query and filters results to post permalinks (`/r/.+/comments/[a-z0-9]+/`); subreddit homepages are dropped. \"both\" = runs every query twice (open web + reddit-scoped), merges the result set, and tags each row with its source. Use \"reddit\" for sentiment/migration/lived-experience research; use \"both\" when you want one call to cover both branches.',\n ),\n verbose: z\n .boolean()\n .default(false)\n .describe(\n 'Include the per-row scoring/coverage metadata, the trailing Signals block, and the CONSENSUS labels even when they carry little signal (single-query hits, threshold of 1). Default false \u2014 most agents do not need this and it costs ~1.5KB per call on a typical 3-query fan-out.',\n ),\n}).strict();\n\nexport type WebSearchParams = z.infer<typeof webSearchParamsSchema>;\n\nexport const webSearchOutputSchema = z.object({\n // `content` deliberately NOT duplicated here \u2014 the primary markdown lives in\n // the MCP tool result's `content[0].text`. Previously this schema echoed the\n // whole markdown under `structuredContent.content`, doubling token cost for\n // clients that forward both fields to an LLM.\n results: z\n .array(z.object({\n rank: z.number().int().positive().describe('1-based rank in the merged ranking.'),\n url: z.string().describe('Result URL.'),\n title: z.string().describe('Page title from the result.'),\n snippet: z.string().describe('Search snippet from the result.'),\n source_type: z\n .enum(['reddit', 'github', 'docs', 'blog', 'paper', 'qa', 'cve', 'news', 'video', 'web'])\n .describe(\n 'Heuristic source kind from the URL. When the LLM classifier is online its tag overrides this.',\n ),\n score: z.number().describe('Composite CTR-weighted score, normalized to 100.'),\n seen_in: z.number().int().nonnegative().describe('Number of input queries this URL appeared in.'),\n best_position: z.number().int().nonnegative().describe('Best (lowest) SERP position observed.'),\n }))\n .optional()\n .describe('Per-result structured payload \u2014 same data the markdown table renders, machine-readable.'),\n metadata: z.object({\n total_items: z.number().int().nonnegative().describe('Number of queries executed.'),\n successful: z.number().int().nonnegative().describe('Queries that returned results.'),\n failed: z.number().int().nonnegative().describe('Queries that failed.'),\n execution_time_ms: z.number().int().nonnegative().describe('Wall clock time in milliseconds.'),\n llm_classified: z.boolean().describe('Whether LLM classification was applied.'),\n llm_error: z.string().optional().describe('LLM error if classification failed and fell back to raw.'),\n scope: z.enum(['web', 'reddit', 'both']).optional().describe('Search scope used.'),\n coverage_summary: z\n .array(z.object({\n query: z.string().describe('The search query.'),\n result_count: z.number().int().nonnegative().describe('Results returned for this query.'),\n top_url: z.string().optional().describe('Domain of the top result.'),\n }))\n .optional()\n .describe('Per-query result counts and top URLs.'),\n low_yield_queries: z\n .array(z.string())\n .optional()\n .describe('Queries that produced 0-1 results.'),\n query_rewrites: z\n .array(z.object({\n original: z.string().describe('The query as the agent submitted it.'),\n rewritten: z.string().describe('The query as dispatched to Google after Phase A normalization.'),\n rules: z.array(z.string()).describe('Rule ids applied (A1=operator-char de-quote, A2=path/URL de-quote, A3=phrase-AND collapse).'),\n }))\n .optional()\n .describe('Pre-dispatch query rewrites \u2014 Phase A normalizations (operator-char and path/URL de-quote, phrase-AND \u2192 anchor + OR collapse).'),\n retried_queries: z\n .array(z.object({\n original: z.string().describe('The query as dispatched (post-Phase-A) that returned 0 results.'),\n retried_with: z.string().describe('The relaxed form retried after the empty initial response.'),\n rules: z.array(z.string()).describe('Rule ids applied (B1=strip all quotes, B2=drop site: filter).'),\n recovered_results: z.number().int().nonnegative().describe('How many hits the retry produced; 0 means the retry also failed.'),\n }))\n .optional()\n .describe('On-empty retries \u2014 Phase B relaxations applied after the initial Serper batch returned 0 results for a query.'),\n retry_error: z\n .object({\n phase: z.literal('relax-retry').describe('Retry phase that failed after the initial batch succeeded.'),\n code: z.string().describe('Structured error code from the retry batch.'),\n message: z.string().describe('Provider error message from the retry batch.'),\n retryable: z.boolean().describe('Whether the retry-batch provider failure is retryable.'),\n statusCode: z.number().int().optional().describe('Provider status code when available.'),\n })\n .optional()\n .describe('Non-fatal failure from the relaxed retry batch; initial search results were preserved.'),\n }).strict(),\n}).strict();\n\nexport type WebSearchOutput = z.infer<typeof webSearchOutputSchema>;\n"],
5
- "mappings": "AAAA,SAAS,SAAS;AAEX,MAAM,wBAAwB,EAAE,OAAO;AAAA,EAC5C,SAAS,EACN;AAAA,IACC,EAAE,OAAO,EACN,IAAI,GAAG,EAAE,SAAS,oCAAoC,CAAC,EACvD,SAAS,4JAA4J;AAAA,EAC1K,EACC,IAAI,GAAG,EAAE,SAAS,wCAAwC,CAAC,EAC3D;AAAA,IACC;AAAA,EACF;AAAA,EACF,SAAS,EACN,OAAO,EACP,IAAI,GAAG,EAAE,SAAS,sCAAsC,CAAC,EACzD;AAAA,IACC;AAAA,EACF;AAAA,EACF,KAAK,EACF,QAAQ,EACR,QAAQ,KAAK,EACb,SAAS,oGAAoG;AAAA,EAChH,OAAO,EACJ,KAAK,CAAC,OAAO,UAAU,MAAM,CAAC,EAC9B,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AAAA,EACF,SAAS,EACN,QAAQ,EACR,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AACJ,CAAC,EAAE,OAAO;AAIH,MAAM,wBAAwB,EAAE,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA,EAK5C,SAAS,EACN,MAAM,EAAE,OAAO;AAAA,IACd,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,SAAS,qCAAqC;AAAA,IAChF,KAAK,EAAE,OAAO,EAAE,SAAS,aAAa;AAAA,IACtC,OAAO,EAAE,OAAO,EAAE,SAAS,6BAA6B;AAAA,IACxD,SAAS,EAAE,OAAO,EAAE,SAAS,iCAAiC;AAAA,IAC9D,aAAa,EACV,KAAK,CAAC,UAAU,UAAU,QAAQ,QAAQ,SAAS,MAAM,OAAO,QAAQ,SAAS,KAAK,CAAC,EACvF;AAAA,MACC;AAAA,IACF;AAAA,IACF,OAAO,EAAE,OAAO,EAAE,SAAS,kDAAkD;AAAA,IAC7E,SAAS,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,+CAA+C;AAAA,IAChG,eAAe,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,uCAAuC;AAAA,EAChG,CAAC,CAAC,EACD,SAAS,EACT,SAAS,8FAAyF;AAAA,EACrG,UAAU,EAAE,OAAO;AAAA,IACjB,aAAa,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,6BAA6B;AAAA,IAClF,YAAY,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,gCAAgC;AAAA,IACpF,QAAQ,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,sBAAsB;AAAA,IACtE,mBAAmB,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kCAAkC;AAAA,IAC7F,gBAAgB,EAAE,QAAQ,EAAE,SAAS,yCAAyC;AAAA,IAC9E,WAAW,EAAE,OAAO,EAAE,SAAS,EAAE,SAAS,0DAA0D;AAAA,IACpG,OAAO,EAAE,KAAK,CAAC,OAAO,UAAU,MAAM,CAAC,EAAE,SAAS,EAAE,SAAS,oBAAoB;AAAA,IACjF,kBAAkB,EACf,MAAM,EAAE,OAAO;AAAA,MACd,OAAO,EAAE,OAAO,EAAE,SAAS,mBAAmB;AAAA,MAC9C,cAAc,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kCAAkC;AAAA,MACxF,SAAS,EAAE,OAAO,EAAE,SAAS,EAAE,SAAS,2BAA2B;AAAA,IACrE,CAAC,CAAC,EACD,SAAS,EACT,SAAS,uCAAuC;AAAA,IACnD,mBAAmB,EAChB,MAAM,EAAE,OAAO,CAAC,EAChB,SAAS,EACT,SAAS,oCAAoC;AAAA,IAChD,gBAAgB,EACb,MAAM,EAAE,OAAO;AAAA,MACd,UAAU,EAAE,OAAO,EAAE,SAAS,sCAAsC;AAAA,MACpE,WAAW,EAAE,OAAO,EAAE,SAAS,gEAAgE;AAAA,MAC/F,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,SAAS,6FAA6F;AAAA,IACnI,CAAC,CAAC,EACD,SAAS,EACT,SAAS,0IAAgI;AAAA,IAC5I,iBAAiB,EACd,MAAM,EAAE,OAAO;AAAA,MACd,UAAU,EAAE,OAAO,EAAE,SAAS,iEAAiE;AAAA,MAC/F,cAAc,EAAE,OAAO,EAAE,SAAS,4DAA4D;AAAA,MAC9F,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,SAAS,+DAA+D;AAAA,MACnG,mBAAmB,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kEAAkE;AAAA,IAC/H,CAAC,CAAC,EACD,SAAS,EACT,SAAS,oHAA+G;AAAA,IAC3H,aAAa,EACV,OAAO;AAAA,MACN,OAAO,EAAE,QAAQ,aAAa,EAAE,SAAS,4DAA4D;AAAA,MACrG,MAAM,EAAE,OAAO,EAAE,SAAS,6CAA6C;AAAA,MACvE,SAAS,EAAE,OAAO,EAAE,SAAS,8CAA8C;AAAA,MAC3E,WAAW,EAAE,QAAQ,EAAE,SAAS,wDAAwD;AAAA,MACxF,YAAY,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,SAAS,sCAAsC;AAAA,IACzF,CAAC,EACA,SAAS,EACT,SAAS,wFAAwF;AAAA,EACtG,CAAC,EAAE,OAAO;AACZ,CAAC,EAAE,OAAO;",
4
+ "sourcesContent": ["import { z } from 'zod';\n\nexport const webSearchParamsSchema = z.object({\n queries: z\n .array(\n z.string()\n .min(1, { message: 'web-search: Query cannot be empty' })\n .describe('A single Google search query. Each query runs as a separate parallel search. Use operators (site:, quotes, verbatim version numbers) to sharpen retrieval.'),\n )\n .min(1, { message: 'web-search: At least 1 query required' })\n .describe(\n 'Search queries to run in parallel via Google. Think of these as **concept groups** \u2014 clusters of semantically distinct facets of your research goal, each probing a DIFFERENT angle (official spec, implementation, failures, comparison, sentiment, changelog, CVE, pricing). Fire all groups in ONE call as a flat array. Overlapping queries waste budget; orthogonal facets multiply coverage. A narrow bug needs 10\u201320 queries across 2\u20133 facets; a comparison needs 25\u201335 across 4\u20136 facets; open-ended synthesis needs 40\u201380 across 8+ facets.',\n ),\n extract: z\n .string()\n .min(1, { message: 'web-search: extract cannot be empty' })\n .describe(\n 'Semantic instruction for the relevance classifier \u2014 what \"relevant\" means for THIS goal. Drives tiering (HIGHLY_RELEVANT / MAYBE_RELEVANT / OTHER), synthesis, gap analysis, and refine-query suggestions. Be specific: \"OAuth 2.1 support in TypeScript MCP frameworks \u2014 runnable code, not marketing\", not \"MCP OAuth\". The classifier uses this to choose a source-of-truth rubric (vendor_doc for spec, github for bugs, reddit/blog for migration/sentiment, cve_databases for security).',\n ),\n raw: z\n .boolean()\n .default(false)\n .describe('Skip LLM classification and return the raw ranked URL list. Use when you need unprocessed results.'),\n scope: z\n .enum(['web', 'reddit', 'both'])\n .default('web')\n .describe(\n 'Search scope. \"web\" (default) = open web, no augmentation. \"reddit\" = server appends `site:reddit.com` to every query and filters results to post permalinks (`/r/.+/comments/[a-z0-9]+/`); subreddit homepages are dropped. \"both\" = runs every query twice (open web + reddit-scoped), merges the result set, and tags each row with its source. Use \"reddit\" for sentiment/migration/lived-experience research; use \"both\" when you want one call to cover both branches.',\n ),\n verbose: z\n .boolean()\n .default(false)\n .describe(\n 'Include the per-row scoring/coverage metadata, the trailing Signals block, and the CONSENSUS labels even when they carry little signal (single-query hits, threshold of 1). Default false \u2014 most agents do not need this and it costs ~1.5KB per call on a typical 3-query fan-out.',\n ),\n}).strict();\n\nexport type WebSearchParams = z.infer<typeof webSearchParamsSchema>;\n\nexport const webSearchOutputSchema = z.object({\n content: z\n .string()\n .describe(\n 'Rendered search report, including ranked URLs, classification synthesis, gaps, and follow-up searches. Duplicates the MCP content text for clients that only expose structuredContent.',\n ),\n results: z\n .array(z.object({\n rank: z.number().int().positive().describe('1-based rank in the merged ranking.'),\n url: z.string().describe('Result URL.'),\n title: z.string().describe('Page title from the result.'),\n snippet: z.string().describe('Search snippet from the result.'),\n source_type: z\n .enum(['reddit', 'github', 'docs', 'blog', 'paper', 'qa', 'cve', 'news', 'video', 'web'])\n .describe(\n 'Heuristic source kind from the URL. When the LLM classifier is online its tag overrides this.',\n ),\n score: z.number().describe('Composite CTR-weighted score, normalized to 100.'),\n seen_in: z.number().int().nonnegative().describe('Number of input queries this URL appeared in.'),\n best_position: z.number().int().nonnegative().describe('Best (lowest) SERP position observed.'),\n }))\n .optional()\n .describe('Per-result structured payload \u2014 same data the markdown table renders, machine-readable.'),\n metadata: z.object({\n total_items: z.number().int().nonnegative().describe('Number of queries executed.'),\n successful: z.number().int().nonnegative().describe('Queries that returned results.'),\n failed: z.number().int().nonnegative().describe('Queries that failed.'),\n execution_time_ms: z.number().int().nonnegative().describe('Wall clock time in milliseconds.'),\n llm_classified: z.boolean().describe('Whether LLM classification was applied.'),\n llm_error: z.string().optional().describe('LLM error if classification failed and fell back to raw.'),\n scope: z.enum(['web', 'reddit', 'both']).optional().describe('Search scope used.'),\n coverage_summary: z\n .array(z.object({\n query: z.string().describe('The search query.'),\n result_count: z.number().int().nonnegative().describe('Results returned for this query.'),\n top_url: z.string().optional().describe('Domain of the top result.'),\n }))\n .optional()\n .describe('Per-query result counts and top URLs.'),\n low_yield_queries: z\n .array(z.string())\n .optional()\n .describe('Queries that produced 0-1 results.'),\n query_rewrites: z\n .array(z.object({\n original: z.string().describe('The query as the agent submitted it.'),\n rewritten: z.string().describe('The query as dispatched to Google after Phase A normalization.'),\n rules: z.array(z.string()).describe('Rule ids applied (A1=operator-char de-quote, A2=path/URL de-quote, A3=phrase-AND collapse).'),\n }))\n .optional()\n .describe('Pre-dispatch query rewrites \u2014 Phase A normalizations (operator-char and path/URL de-quote, phrase-AND \u2192 anchor + OR collapse).'),\n retried_queries: z\n .array(z.object({\n original: z.string().describe('The query as dispatched (post-Phase-A) that returned 0 results.'),\n retried_with: z.string().describe('The relaxed form retried after the empty initial response.'),\n rules: z.array(z.string()).describe('Rule ids applied (B1=strip all quotes, B2=drop site: filter).'),\n recovered_results: z.number().int().nonnegative().describe('How many hits the retry produced; 0 means the retry also failed.'),\n }))\n .optional()\n .describe('On-empty retries \u2014 Phase B relaxations applied after the initial Serper batch returned 0 results for a query.'),\n retry_error: z\n .object({\n phase: z.literal('relax-retry').describe('Retry phase that failed after the initial batch succeeded.'),\n code: z.string().describe('Structured error code from the retry batch.'),\n message: z.string().describe('Provider error message from the retry batch.'),\n retryable: z.boolean().describe('Whether the retry-batch provider failure is retryable.'),\n statusCode: z.number().int().optional().describe('Provider status code when available.'),\n })\n .optional()\n .describe('Non-fatal failure from the relaxed retry batch; initial search results were preserved.'),\n }).strict(),\n}).strict();\n\nexport type WebSearchOutput = z.infer<typeof webSearchOutputSchema>;\n"],
5
+ "mappings": "AAAA,SAAS,SAAS;AAEX,MAAM,wBAAwB,EAAE,OAAO;AAAA,EAC5C,SAAS,EACN;AAAA,IACC,EAAE,OAAO,EACN,IAAI,GAAG,EAAE,SAAS,oCAAoC,CAAC,EACvD,SAAS,4JAA4J;AAAA,EAC1K,EACC,IAAI,GAAG,EAAE,SAAS,wCAAwC,CAAC,EAC3D;AAAA,IACC;AAAA,EACF;AAAA,EACF,SAAS,EACN,OAAO,EACP,IAAI,GAAG,EAAE,SAAS,sCAAsC,CAAC,EACzD;AAAA,IACC;AAAA,EACF;AAAA,EACF,KAAK,EACF,QAAQ,EACR,QAAQ,KAAK,EACb,SAAS,oGAAoG;AAAA,EAChH,OAAO,EACJ,KAAK,CAAC,OAAO,UAAU,MAAM,CAAC,EAC9B,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AAAA,EACF,SAAS,EACN,QAAQ,EACR,QAAQ,KAAK,EACb;AAAA,IACC;AAAA,EACF;AACJ,CAAC,EAAE,OAAO;AAIH,MAAM,wBAAwB,EAAE,OAAO;AAAA,EAC5C,SAAS,EACN,OAAO,EACP;AAAA,IACC;AAAA,EACF;AAAA,EACF,SAAS,EACN,MAAM,EAAE,OAAO;AAAA,IACd,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,SAAS,qCAAqC;AAAA,IAChF,KAAK,EAAE,OAAO,EAAE,SAAS,aAAa;AAAA,IACtC,OAAO,EAAE,OAAO,EAAE,SAAS,6BAA6B;AAAA,IACxD,SAAS,EAAE,OAAO,EAAE,SAAS,iCAAiC;AAAA,IAC9D,aAAa,EACV,KAAK,CAAC,UAAU,UAAU,QAAQ,QAAQ,SAAS,MAAM,OAAO,QAAQ,SAAS,KAAK,CAAC,EACvF;AAAA,MACC;AAAA,IACF;AAAA,IACF,OAAO,EAAE,OAAO,EAAE,SAAS,kDAAkD;AAAA,IAC7E,SAAS,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,+CAA+C;AAAA,IAChG,eAAe,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,uCAAuC;AAAA,EAChG,CAAC,CAAC,EACD,SAAS,EACT,SAAS,8FAAyF;AAAA,EACrG,UAAU,EAAE,OAAO;AAAA,IACjB,aAAa,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,6BAA6B;AAAA,IAClF,YAAY,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,gCAAgC;AAAA,IACpF,QAAQ,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,sBAAsB;AAAA,IACtE,mBAAmB,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kCAAkC;AAAA,IAC7F,gBAAgB,EAAE,QAAQ,EAAE,SAAS,yCAAyC;AAAA,IAC9E,WAAW,EAAE,OAAO,EAAE,SAAS,EAAE,SAAS,0DAA0D;AAAA,IACpG,OAAO,EAAE,KAAK,CAAC,OAAO,UAAU,MAAM,CAAC,EAAE,SAAS,EAAE,SAAS,oBAAoB;AAAA,IACjF,kBAAkB,EACf,MAAM,EAAE,OAAO;AAAA,MACd,OAAO,EAAE,OAAO,EAAE,SAAS,mBAAmB;AAAA,MAC9C,cAAc,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kCAAkC;AAAA,MACxF,SAAS,EAAE,OAAO,EAAE,SAAS,EAAE,SAAS,2BAA2B;AAAA,IACrE,CAAC,CAAC,EACD,SAAS,EACT,SAAS,uCAAuC;AAAA,IACnD,mBAAmB,EAChB,MAAM,EAAE,OAAO,CAAC,EAChB,SAAS,EACT,SAAS,oCAAoC;AAAA,IAChD,gBAAgB,EACb,MAAM,EAAE,OAAO;AAAA,MACd,UAAU,EAAE,OAAO,EAAE,SAAS,sCAAsC;AAAA,MACpE,WAAW,EAAE,OAAO,EAAE,SAAS,gEAAgE;AAAA,MAC/F,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,SAAS,6FAA6F;AAAA,IACnI,CAAC,CAAC,EACD,SAAS,EACT,SAAS,0IAAgI;AAAA,IAC5I,iBAAiB,EACd,MAAM,EAAE,OAAO;AAAA,MACd,UAAU,EAAE,OAAO,EAAE,SAAS,iEAAiE;AAAA,MAC/F,cAAc,EAAE,OAAO,EAAE,SAAS,4DAA4D;AAAA,MAC9F,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,SAAS,+DAA+D;AAAA,MACnG,mBAAmB,EAAE,OAAO,EAAE,IAAI,EAAE,YAAY,EAAE,SAAS,kEAAkE;AAAA,IAC/H,CAAC,CAAC,EACD,SAAS,EACT,SAAS,oHAA+G;AAAA,IAC3H,aAAa,EACV,OAAO;AAAA,MACN,OAAO,EAAE,QAAQ,aAAa,EAAE,SAAS,4DAA4D;AAAA,MACrG,MAAM,EAAE,OAAO,EAAE,SAAS,6CAA6C;AAAA,MACvE,SAAS,EAAE,OAAO,EAAE,SAAS,8CAA8C;AAAA,MAC3E,WAAW,EAAE,QAAQ,EAAE,SAAS,wDAAwD;AAAA,MACxF,YAAY,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,SAAS,sCAAsC;AAAA,IACzF,CAAC,EACA,SAAS,EACT,SAAS,wFAAwF;AAAA,EACtG,CAAC,EAAE,OAAO;AACZ,CAAC,EAAE,OAAO;",
6
6
  "names": []
7
7
  }
@@ -1,5 +1,3 @@
1
- import { registerDeepResearchPrompt } from "../prompts/deep-research.js";
2
- import { registerRedditSentimentPrompt } from "../prompts/reddit-sentiment.js";
3
1
  import { registerScrapeLinksTool } from "./scrape.js";
4
2
  import { registerWebSearchTool } from "./search.js";
5
3
  import { registerStartResearchTool } from "./start-research.js";
@@ -7,8 +5,6 @@ function registerAllTools(server) {
7
5
  registerStartResearchTool(server);
8
6
  registerWebSearchTool(server);
9
7
  registerScrapeLinksTool(server);
10
- registerDeepResearchPrompt(server);
11
- registerRedditSentimentPrompt(server);
12
8
  }
13
9
  export {
14
10
  registerAllTools
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/tools/registry.ts"],
4
- "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport { registerDeepResearchPrompt } from '../prompts/deep-research.js';\nimport { registerRedditSentimentPrompt } from '../prompts/reddit-sentiment.js';\nimport { registerScrapeLinksTool } from './scrape.js';\nimport { registerWebSearchTool } from './search.js';\nimport { registerStartResearchTool } from './start-research.js';\n\nexport function registerAllTools(server: MCPServer): void {\n // 3 research tools. get-reddit-post was merged into scrape-links (auto-detects\n // reddit.com URLs). search-reddit was replaced by web-search with scope=\"reddit\".\n registerStartResearchTool(server);\n registerWebSearchTool(server);\n registerScrapeLinksTool(server);\n registerDeepResearchPrompt(server);\n registerRedditSentimentPrompt(server);\n}\n"],
5
- "mappings": "AAEA,SAAS,kCAAkC;AAC3C,SAAS,qCAAqC;AAC9C,SAAS,+BAA+B;AACxC,SAAS,6BAA6B;AACtC,SAAS,iCAAiC;AAEnC,SAAS,iBAAiB,QAAyB;AAGxD,4BAA0B,MAAM;AAChC,wBAAsB,MAAM;AAC5B,0BAAwB,MAAM;AAC9B,6BAA2B,MAAM;AACjC,gCAA8B,MAAM;AACtC;",
4
+ "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport { registerScrapeLinksTool } from './scrape.js';\nimport { registerWebSearchTool } from './search.js';\nimport { registerStartResearchTool } from './start-research.js';\n\nexport function registerAllTools(server: MCPServer): void {\n // 3 research tools. get-reddit-post was merged into scrape-links (auto-detects\n // reddit.com URLs). search-reddit was replaced by web-search with scope=\"reddit\".\n registerStartResearchTool(server);\n registerWebSearchTool(server);\n registerScrapeLinksTool(server);\n}\n"],
5
+ "mappings": "AAEA,SAAS,+BAA+B;AACxC,SAAS,6BAA6B;AACtC,SAAS,iCAAiC;AAEnC,SAAS,iBAAiB,QAAyB;AAGxD,4BAA0B,MAAM;AAChC,wBAAsB,MAAM;AAC5B,0BAAwB,MAAM;AAChC;",
6
6
  "names": []
7
7
  }
@@ -563,7 +563,7 @@ async function handleWebSearch(params, reporter = NOOP_REPORTER, searchExecutor
563
563
  ---
564
564
  *${formatDuration(executionTime)} | ${aggregation.totalUniqueUrls} unique URLs${llmClassified ? " | LLM classified" : ""}*`;
565
565
  const fullMarkdown = markdown + footer;
566
- return toolSuccess(fullMarkdown, { results, metadata });
566
+ return toolSuccess(fullMarkdown, { content: fullMarkdown, results, metadata });
567
567
  } catch (error) {
568
568
  return buildWebSearchError(error, params, startTime);
569
569
  }
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/tools/search.ts"],
4
- "sourcesContent": ["/**\n * Web Search Tool Handler\n * NEVER throws - always returns structured response for graceful degradation\n */\n\nimport type { MCPServer } from 'mcp-use/server';\n\nimport { getCapabilities, getMissingEnvMessage } from '../config/index.js';\nimport {\n webSearchOutputSchema,\n webSearchParamsSchema,\n type WebSearchParams,\n type WebSearchOutput,\n} from '../schemas/web-search.js';\nimport { SearchClient, type MultipleSearchResponse } from '../clients/search.js';\nimport {\n aggregateAndRank,\n generateUnifiedOutput,\n} from '../utils/url-aggregator.js';\nimport {\n createLLMProcessor,\n classifySearchResults,\n suggestRefineQueriesForRawMode,\n type ClassificationEntry,\n type ClassificationResult,\n type RefineQuerySuggestion,\n} from '../services/llm-processor.js';\nimport { classifyError, type StructuredError } from '../utils/errors.js';\nimport { classifySourceByUrl } from '../utils/source-type.js';\nimport {\n mcpLog,\n formatError,\n formatDuration,\n} from './utils.js';\nimport {\n createToolReporter,\n NOOP_REPORTER,\n toolFailure,\n toolSuccess,\n toToolResponse,\n type ToolExecutionResult,\n type ToolReporter,\n} from './mcp-helpers.js';\nimport { sanitizeSuggestion } from '../utils/sanitize.js';\nimport {\n normalizeQueryForDispatch,\n relaxQueryForRetry,\n} from '../utils/query-relax.js';\n\n// --- Internal types ---\n\ninterface SearchAggregation {\n readonly rankedUrls: ReturnType<typeof aggregateAndRank>['rankedUrls'];\n readonly totalUniqueUrls: number;\n readonly frequencyThreshold: number;\n readonly thresholdNote?: string;\n}\n\nexport type SearchResponse = MultipleSearchResponse;\nexport type SearchExecutor = (queries: string[]) => Promise<SearchResponse>;\n\ntype SearchFailurePhase = 'initial' | 'relax-retry';\ntype SearchResultScope = 'web' | 'reddit';\n\n// --- Helpers ---\n\n/** Reddit post permalink: /r/{sub}/comments/{id}/ \u2014 drops subreddit\n * homepages, /rising, /new, /top, etc. so only post URLs reach the agent.\n * See mcp-revisions/tool-surface/02-extend-web-search-with-reddit-scope.md. */\nconst REDDIT_POST_PERMALINK = /\\/r\\/[^/]+\\/comments\\/[a-z0-9]+\\//i;\nconst REDDIT_HOST = /(?:^|\\.)reddit\\.com$/i;\n\ninterface ScopedQuery {\n query: string;\n resultScope: SearchResultScope;\n dropSiteOnRetry: boolean;\n}\n\nfunction redditScopedQuery(query: string): string {\n return /\\bsite:reddit\\.com\\b/i.test(query) ? query : `${query} site:reddit.com`;\n}\n\nfunction buildScopedQueries(queries: string[], scope: 'web' | 'reddit' | 'both'): ScopedQuery[] {\n if (scope === 'web') {\n return queries.map((query) => ({ query, resultScope: 'web', dropSiteOnRetry: true }));\n }\n\n const reddited = queries.map((q) =>\n ({ query: redditScopedQuery(q), resultScope: 'reddit' as const, dropSiteOnRetry: false }),\n );\n\n if (scope === 'reddit') return reddited;\n\n return [\n ...queries.map((query) => ({ query, resultScope: 'web' as const, dropSiteOnRetry: true })),\n ...reddited,\n ];\n}\n\nasync function executeSearches(queries: string[]): Promise<SearchResponse> {\n const client = new SearchClient();\n return client.searchMultiple(queries);\n}\n\ninterface QueryRewriteRecord {\n original: string;\n rewritten: string;\n rules: string[];\n}\n\ninterface RetriedQueryRecord {\n original: string;\n retried_with: string;\n rules: string[];\n recovered_results: number;\n}\n\n/** Run Serper, then for each query that returned 0 results build a relaxed\n * retry (Phase B) and reissue them in a single second batch. Replace the\n * empty slot with the retry's results when the retry recovered \u22651 hit, but\n * keep the original query string in the slot so downstream aggregation and\n * follow-up rendering stay consistent. */\nasync function executeWithRelaxRetry(\n dispatched: string[],\n reporter: ToolReporter,\n searchExecutor: SearchExecutor = executeSearches,\n retryOptions: { readonly dropSiteOnRetry?: readonly boolean[] } = {},\n): Promise<{\n response: SearchResponse;\n retried: RetriedQueryRecord[];\n failurePhase?: SearchFailurePhase;\n retryError?: StructuredError;\n}> {\n const initial = await searchExecutor(dispatched);\n\n if (initial.error) {\n return { response: initial, retried: [], failurePhase: 'initial' };\n }\n\n const emptyIndices = initial.searches\n .map((s, i) => (s.results.length === 0 ? i : -1))\n .filter((i) => i !== -1);\n\n if (emptyIndices.length === 0) {\n return { response: initial, retried: [] };\n }\n\n interface Plan { index: number; original: string; relaxed: string; rules: string[] }\n const plans: Plan[] = [];\n for (const idx of emptyIndices) {\n const dq = dispatched[idx];\n if (typeof dq !== 'string') continue;\n const r = relaxQueryForRetry(dq, { dropSite: retryOptions.dropSiteOnRetry?.[idx] ?? true });\n if (r.changed && r.rewritten !== dq) {\n plans.push({ index: idx, original: dq, relaxed: r.rewritten, rules: [...r.rules] });\n }\n }\n\n if (plans.length === 0) {\n return { response: initial, retried: [] };\n }\n\n mcpLog(\n 'info',\n `${plans.length}/${emptyIndices.length} empty-result queries eligible for relaxation retry`,\n 'search',\n );\n await reporter.log(\n 'info',\n `${plans.length} queries returned 0 results; retrying with relaxation`,\n );\n\n const retryResp = await searchExecutor(plans.map((p) => p.relaxed));\n const retried: RetriedQueryRecord[] = [];\n const retryByIndex = new Map<number, SearchResponse['searches'][number]>();\n\n plans.forEach((plan, i) => {\n const r = retryResp.searches[i];\n if (r) retryByIndex.set(plan.index, r);\n retried.push({\n original: plan.original,\n retried_with: plan.relaxed,\n rules: plan.rules,\n recovered_results: r?.results.length ?? 0,\n });\n });\n\n if (retryResp.error) {\n mcpLog(\n 'warning',\n `Relaxed retry batch failed; preserving initial search results: ${retryResp.error.message}`,\n 'search',\n );\n await reporter.log(\n 'warning',\n `search_relax_retry_failed: ${retryResp.error.message}`,\n );\n return {\n response: initial,\n retried,\n retryError: retryResp.error,\n };\n }\n\n const mergedSearches = initial.searches.map((s, idx) => {\n const r = retryByIndex.get(idx);\n if (r && r.results.length > 0) {\n return { ...r, query: s.query };\n }\n return s;\n });\n\n return {\n response: { ...initial, searches: mergedSearches },\n retried,\n };\n}\n\nfunction filterScopedSearches(\n response: SearchResponse,\n scope: 'web' | 'reddit' | 'both',\n resultScopes: readonly SearchResultScope[] = [],\n): SearchResponse {\n if (scope === 'web') return response;\n const filtered = response.searches.map((search, index) => {\n const resultScope = resultScopes[index] ?? (scope === 'reddit' ? 'reddit' : 'web');\n return {\n ...search,\n results: search.results.filter((r) => {\n let host: string;\n try { host = new URL(r.link).hostname; } catch { return true; }\n if (resultScope === 'reddit') {\n return REDDIT_HOST.test(host) && REDDIT_POST_PERMALINK.test(r.link);\n }\n // Web-side results pass through; reddit URLs still must be post permalinks.\n if (!REDDIT_HOST.test(host)) return true;\n return REDDIT_POST_PERMALINK.test(r.link);\n }),\n };\n });\n return { ...response, searches: filtered };\n}\n\nfunction processResults(response: SearchResponse): {\n aggregation: SearchAggregation;\n} {\n const aggregation = aggregateAndRank(response.searches, 5);\n return { aggregation };\n}\n\n// --- Raw output (traditional unified ranked list) ---\n\nfunction buildRawOutput(\n queries: string[],\n aggregation: SearchAggregation,\n searches: SearchResponse['searches'],\n verbose: boolean = false,\n): string {\n return generateUnifiedOutput(\n aggregation.rankedUrls, queries, searches,\n aggregation.totalUniqueUrls,\n aggregation.frequencyThreshold, aggregation.thresholdNote,\n verbose,\n );\n}\n\nfunction buildSignalsSection(\n aggregation: SearchAggregation,\n searches: SearchResponse['searches'],\n totalQueries: number,\n): string {\n const coverageCount = searches.filter((search) => search.results.length >= 3).length;\n const lowYield = searches\n .filter((search) => search.results.length <= 1)\n .map((search) => `\"${search.query}\"`);\n const consensusCount = aggregation.rankedUrls.filter((url) => url.isConsensus).length;\n\n const lines = [\n '**Signals**',\n `- Coverage: ${coverageCount}/${totalQueries} queries returned \u22653 results`,\n `- Consensus URLs: ${consensusCount}`,\n ];\n\n if (lowYield.length > 0) {\n lines.push(`- Low-yield: ${lowYield.join(', ')}`);\n }\n\n return lines.join('\\n');\n}\n\nexport function buildSuggestedFollowUpsSection(\n refineQueries: Array<{ query: string; rationale?: string; gap_id?: number; gap_description?: string }> | undefined,\n): string {\n if (!refineQueries || refineQueries.length === 0) {\n return '';\n }\n\n const lines = ['## Suggested follow-up searches', ''];\n\n for (const item of refineQueries) {\n const query = sanitizeSuggestion(item.query ?? '');\n if (!query) continue;\n const rationale = sanitizeSuggestion(item.rationale ?? '');\n const gapTag = typeof item.gap_id === 'number'\n ? ` _(closes gap [${item.gap_id}])_`\n : item.gap_description\n ? ` _(${sanitizeSuggestion(item.gap_description)})_`\n : '';\n lines.push(rationale\n ? `- ${query} \u2014 ${rationale}${gapTag}`\n : `- ${query}${gapTag}`,\n );\n }\n\n return lines.length === 2 ? '' : lines.join('\\n');\n}\n\nexport function appendSignalsAndFollowUps(\n markdown: string,\n signalsSection: string,\n refineQueries: RefineQuerySuggestion[] | undefined,\n options: { includeSignals?: boolean } = {},\n): string {\n const includeSignals = options.includeSignals ?? false;\n const sections = [markdown];\n if (includeSignals && signalsSection) {\n sections.push('', '---', signalsSection);\n }\n const followUps = buildSuggestedFollowUpsSection(refineQueries);\n if (followUps) {\n sections.push('', followUps);\n }\n return sections.join('\\n');\n}\n\n// --- \"Start here\" section ---\n//\n// Surfaces the best 3-5 URLs at the top of the classified response so an agent\n// skimming the first screen sees them before tier tables. Deterministic: uses\n// existing `tier` + `rank` + `reason` from the classifier, no extra LLM call.\n//\n// Algorithm: take HIGHLY_RELEVANT by rank up to MAX_START_HERE; if fewer than\n// MIN_START_HERE, pad from top MAYBE_RELEVANT; skip entirely if no entries\n// above OTHER.\n\nconst MIN_START_HERE = 3;\nconst MAX_START_HERE = 5;\n\n/** Minimal structural shape \u2014 avoids coupling to private `RankedUrl` type. */\ninterface StartHereCandidate {\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n}\n\ninterface StartHereTiers {\n readonly high: readonly StartHereCandidate[];\n readonly maybe: readonly StartHereCandidate[];\n}\n\nexport function buildStartHereSection(\n tiers: StartHereTiers,\n entryByRank: Map<number, ClassificationEntry>,\n opts: { min?: number; max?: number } = {},\n): string {\n const min = opts.min ?? MIN_START_HERE;\n const max = opts.max ?? MAX_START_HERE;\n\n const picks: Array<{ candidate: StartHereCandidate; tier: 'HIGHLY_RELEVANT' | 'MAYBE_RELEVANT' }> = [];\n\n for (const candidate of tiers.high) {\n if (picks.length >= max) break;\n picks.push({ candidate, tier: 'HIGHLY_RELEVANT' });\n }\n\n if (picks.length < min) {\n const target = Math.min(min, max);\n for (const candidate of tiers.maybe) {\n if (picks.length >= target) break;\n picks.push({ candidate, tier: 'MAYBE_RELEVANT' });\n }\n }\n\n if (picks.length === 0) return '';\n\n const lines: string[] = [];\n lines.push('## Start here \u2014 best candidates for your extract');\n picks.forEach((pick, i) => {\n const entry = entryByRank.get(pick.candidate.rank);\n const reason = entry?.reason && entry.reason.trim().length > 0 ? entry.reason : '\u2014';\n let domain: string;\n try {\n domain = new URL(pick.candidate.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = pick.candidate.url;\n }\n lines.push(\n `${i + 1}. **[${pick.candidate.title}](${pick.candidate.url})** \u2014 ${domain} \u2014 ${reason} *(${pick.tier}, rank ${pick.candidate.rank})*`,\n );\n });\n return lines.join('\\n');\n}\n\n// --- Classified output (3-tier LLM-classified table) ---\n\nfunction buildClassifiedOutput(\n classification: ClassificationResult,\n aggregation: SearchAggregation,\n extract: string,\n searches: SearchResponse['searches'],\n totalQueries: number,\n verbose: boolean = false,\n): string {\n const rankedUrls = aggregation.rankedUrls;\n\n // Build tier \u2192 entries mapping (keep url data alongside classifier metadata)\n const entryByRank = new Map(classification.results.map((r) => [r.rank, r]));\n\n const tiers = {\n high: [] as typeof rankedUrls,\n maybe: [] as typeof rankedUrls,\n other: [] as typeof rankedUrls,\n };\n\n for (const url of rankedUrls) {\n const entry = entryByRank.get(url.rank);\n const tier = entry?.tier;\n if (tier === 'HIGHLY_RELEVANT') {\n tiers.high.push(url);\n } else if (tier === 'MAYBE_RELEVANT') {\n tiers.maybe.push(url);\n } else {\n tiers.other.push(url);\n }\n }\n\n const lines: string[] = [];\n\n // Header with generated title, synthesis, and confidence\n lines.push(`## ${classification.title}`);\n lines.push(`> Looking for: ${extract}`);\n lines.push(`> ${totalQueries} queries \u2192 ${rankedUrls.length} URLs \u2192 ${tiers.high.length} highly relevant, ${tiers.maybe.length} possibly relevant`);\n if (classification.confidence) {\n const confReason = classification.confidence_reason ? ` \u2014 ${classification.confidence_reason}` : '';\n lines.push(`> Confidence: \\`${classification.confidence}\\`${confReason}`);\n }\n lines.push('');\n\n // \"Start here\" block: surface the top 3-5 URLs above the synthesis so an\n // agent skimming the first screen sees scrape candidates before prose.\n const startHere = buildStartHereSection(\n { high: tiers.high, maybe: tiers.maybe },\n entryByRank,\n );\n if (startHere) {\n lines.push(startHere);\n lines.push('');\n }\n\n lines.push(`**Summary:** ${classification.synthesis}`);\n lines.push('');\n\n // Helper: render one row with optional source_type + reason\n const renderRichRow = (url: typeof rankedUrls[number]): string => {\n const entry = entryByRank.get(url.rank);\n const coveragePct = Math.round(url.coverageRatio * 100);\n const seenIn = `${url.frequency}/${totalQueries} (${coveragePct}%)`;\n const sourceType = entry?.source_type ? `\\`${entry.source_type}\\`` : '\u2014';\n const reason = entry?.reason ? entry.reason.replace(/\\|/g, '\\\\|') : '\u2014';\n return `| ${url.rank} | [${url.title}](${url.url}) | ${sourceType} | ${seenIn} | ${reason} |`;\n };\n\n // Highly Relevant tier\n if (tiers.high.length > 0) {\n lines.push(`### Highly Relevant (${tiers.high.length})`);\n lines.push('| # | URL | Source | Seen in | Why |');\n lines.push('|---|-----|--------|---------|-----|');\n for (const url of tiers.high) lines.push(renderRichRow(url));\n lines.push('');\n }\n\n // Maybe Relevant tier\n if (tiers.maybe.length > 0) {\n lines.push(`### Maybe Relevant (${tiers.maybe.length})`);\n lines.push('| # | URL | Source | Seen in | Why |');\n lines.push('|---|-----|--------|---------|-----|');\n for (const url of tiers.maybe) lines.push(renderRichRow(url));\n lines.push('');\n }\n\n // Other tier \u2014 with query attribution\n if (tiers.other.length > 0) {\n lines.push(`### Other Results (${tiers.other.length})`);\n lines.push('| # | URL | Source | Score | Queries |');\n lines.push('|---|-----|--------|-------|---------|');\n for (const url of tiers.other) {\n const entry = entryByRank.get(url.rank);\n const queryList = url.queries.map((q) => `\"${q}\"`).join(', ');\n const sourceType = entry?.source_type ? `\\`${entry.source_type}\\`` : '\u2014';\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n lines.push(`| ${url.rank} | ${domain} | ${sourceType} | ${url.score.toFixed(1)} | ${queryList} |`);\n }\n lines.push('');\n }\n\n // Signals block is gated behind verbose \u2014 it duplicates info already\n // present in the per-row metadata for callers who care.\n // See: docs/code-review/context/05-output-formatting-patterns.md.\n if (verbose) {\n lines.push(buildSignalsSection(aggregation, searches, totalQueries));\n }\n\n // Gaps section \u2014 what the current results don't answer\n if (classification.gaps && classification.gaps.length > 0) {\n lines.push('');\n lines.push('## Gaps');\n for (const gap of classification.gaps) {\n lines.push(`- **[${gap.id}]** ${gap.description}`);\n }\n }\n\n const followUps = buildSuggestedFollowUpsSection(classification.refine_queries);\n if (followUps) {\n lines.push('');\n lines.push(followUps);\n }\n\n return lines.join('\\n');\n}\n\n// --- Metadata builder ---\n\nfunction buildMetadata(\n aggregation: SearchAggregation,\n executionTime: number,\n totalQueries: number,\n searches: SearchResponse['searches'],\n llmClassified: boolean,\n scope: 'web' | 'reddit' | 'both',\n llmError?: string,\n queryRewrites?: QueryRewriteRecord[],\n retriedQueries?: RetriedQueryRecord[],\n retryError?: StructuredError,\n) {\n const coverageSummary = searches.map(s => {\n let topDomain: string | undefined;\n const topResult = s.results[0];\n if (topResult) {\n try { topDomain = new URL(topResult.link).hostname.replace(/^www\\./, ''); } catch { /* ignore */ }\n }\n return { query: s.query, result_count: s.results.length, top_url: topDomain };\n });\n const lowYieldQueries = searches\n .filter(s => s.results.length <= 1)\n .map(s => s.query);\n const successfulQueries = searches.filter(s => s.results.length > 0).length;\n\n return {\n total_items: totalQueries,\n successful: successfulQueries,\n failed: Math.max(totalQueries - successfulQueries, 0),\n execution_time_ms: executionTime,\n llm_classified: llmClassified,\n scope,\n ...(llmError ? { llm_error: llmError } : {}),\n coverage_summary: coverageSummary,\n ...(lowYieldQueries.length > 0 ? { low_yield_queries: lowYieldQueries } : {}),\n ...(queryRewrites && queryRewrites.length > 0 ? { query_rewrites: queryRewrites } : {}),\n ...(retriedQueries && retriedQueries.length > 0 ? { retried_queries: retriedQueries } : {}),\n ...(retryError\n ? {\n retry_error: {\n phase: 'relax-retry' as const,\n code: retryError.code,\n message: retryError.message,\n retryable: retryError.retryable,\n ...(typeof retryError.statusCode === 'number' ? { statusCode: retryError.statusCode } : {}),\n },\n }\n : {}),\n };\n}\n\nfunction buildStructuredResults(\n aggregation: SearchAggregation,\n llmTagsByRank?: Map<number, string>,\n): Array<{\n rank: number;\n url: string;\n title: string;\n snippet: string;\n source_type: 'reddit' | 'github' | 'docs' | 'blog' | 'paper' | 'qa' | 'cve' | 'news' | 'video' | 'web';\n score: number;\n seen_in: number;\n best_position: number;\n}> {\n return aggregation.rankedUrls.map((row) => {\n // LLM tag wins when present; heuristic is the always-on fallback. See:\n // mcp-revisions/output-shaping/06-source-type-tagging-without-llm.md.\n const llmTag = llmTagsByRank?.get(row.rank);\n const heuristic = classifySourceByUrl(row.url);\n return {\n rank: row.rank,\n url: row.url,\n title: row.title,\n snippet: row.snippet,\n source_type: ((llmTag as typeof heuristic) ?? heuristic),\n score: Number(row.score.toFixed(2)),\n seen_in: row.frequency,\n best_position: row.bestPosition,\n };\n });\n}\n\n// --- Error builder ---\n\nfunction isStructuredError(error: unknown): error is StructuredError {\n if (typeof error !== 'object' || error === null) return false;\n const record = error as Record<string, unknown>;\n return typeof record.code === 'string'\n && typeof record.message === 'string'\n && typeof record.retryable === 'boolean';\n}\n\nfunction normalizeStructuredError(error: unknown): StructuredError {\n return isStructuredError(error) ? error : classifyError(error);\n}\n\nfunction formatSearchFailureMessage(\n error: StructuredError,\n phase?: SearchFailurePhase,\n): string {\n if (phase === 'initial') {\n return `Search provider failed during initial batch: ${error.message}`;\n }\n\n if (phase === 'relax-retry') {\n return `Search provider failed during relaxed retry batch: ${error.message}`;\n }\n\n return error.message;\n}\n\nfunction buildWebSearchError(\n error: unknown,\n params: WebSearchParams,\n startTime: number,\n phase?: SearchFailurePhase,\n): ToolExecutionResult<WebSearchOutput> {\n const structuredError = normalizeStructuredError(error);\n const message = formatSearchFailureMessage(structuredError, phase);\n const executionTime = Date.now() - startTime;\n\n mcpLog('error', `web-search: ${message}`, 'search');\n\n const errorContent = formatError({\n code: structuredError.code,\n message,\n retryable: structuredError.retryable,\n toolName: 'web-search',\n howToFix: ['Verify SERPER_API_KEY is set correctly'],\n alternatives: [\n 'web-search(queries=[\"topic recommendations\"], extract=\"...\", scope: \"reddit\") \u2014 Reddit-only post permalinks via the same backend',\n 'scrape-links(urls=[...], extract=\"...\") \u2014 if you have URLs from prior steps, scrape them now',\n ],\n });\n\n return toolFailure(\n `${errorContent}\\n\\nExecution time: ${formatDuration(executionTime)}\\nQueries: ${params.queries.length}`,\n );\n}\n\n// --- Main handler ---\n\nexport async function handleWebSearch(\n params: WebSearchParams,\n reporter: ToolReporter = NOOP_REPORTER,\n searchExecutor: SearchExecutor = executeSearches,\n): Promise<ToolExecutionResult<WebSearchOutput>> {\n const startTime = Date.now();\n\n try {\n const scopedQueries = buildScopedQueries(params.queries, params.scope);\n const effectiveQueries = scopedQueries.map((entry) => entry.query);\n if (params.scope !== 'web') {\n mcpLog('info', `Searching scope=${params.scope}: ${params.queries.length} input queries \u2192 ${effectiveQueries.length} dispatched`, 'search');\n } else {\n mcpLog('info', `Searching for ${params.queries.length} query/queries`, 'search');\n }\n await reporter.log('info', `Searching for ${effectiveQueries.length} query/queries (scope=${params.scope})`);\n await reporter.progress(15, 100, 'Submitting search queries');\n\n // Phase A \u2014 pre-dispatch normalizer. Rewrites the small fraction of\n // queries Google was statistically going to mis-handle (3+ phrase AND,\n // operator chars in quotes, paths in quotes). See src/utils/query-relax.ts.\n const dispatchPlan = effectiveQueries.map((q) => {\n const r = normalizeQueryForDispatch(q);\n return { original: q, dispatched: r.rewritten, rules: [...r.rules], changed: r.changed };\n });\n const dispatchedQueries = dispatchPlan.map((p) => p.dispatched);\n const resultScopes = scopedQueries.map((entry) => entry.resultScope);\n const dropSiteOnRetry = scopedQueries.map((entry) => entry.dropSiteOnRetry);\n const queryRewrites: QueryRewriteRecord[] = dispatchPlan\n .filter((p) => p.changed)\n .map((p) => ({ original: p.original, rewritten: p.dispatched, rules: p.rules }));\n\n if (queryRewrites.length > 0) {\n mcpLog(\n 'info',\n `Pre-dispatch normalized ${queryRewrites.length}/${effectiveQueries.length} queries`,\n 'search',\n );\n await reporter.log(\n 'info',\n `Normalized ${queryRewrites.length} queries pre-dispatch`,\n );\n }\n\n // Phase B \u2014 on-empty retry: any query returning 0 results gets one\n // relaxed retry (drop quotes, drop site:). Recovered hits replace the\n // empty slot transparently.\n const {\n response: rawResponse,\n retried: retriedQueries,\n failurePhase,\n retryError,\n } = await executeWithRelaxRetry(\n dispatchedQueries,\n reporter,\n searchExecutor,\n { dropSiteOnRetry },\n );\n\n if (rawResponse.error) {\n await reporter.log('error', `search_provider_failed: ${rawResponse.error.message}`);\n return buildWebSearchError(rawResponse.error, params, startTime, failurePhase);\n }\n\n const response = filterScopedSearches(rawResponse, params.scope, resultScopes);\n await reporter.progress(50, 100, 'Collected search results');\n\n const { aggregation } = processResults(response);\n await reporter.log(\n 'info',\n `Collected ${aggregation.totalUniqueUrls} unique URLs across ${response.totalQueries} queries`,\n );\n\n // Decide: raw output or LLM classification\n const useRaw = params.raw;\n const llmProcessor = createLLMProcessor();\n\n let markdown: string;\n let llmClassified = false;\n let llmError: string | undefined;\n\n if (useRaw || !llmProcessor) {\n // Raw path: traditional unified ranked list\n if (!useRaw && !llmProcessor) {\n llmError = 'LLM unavailable (LLM_API_KEY / LLM_BASE_URL / LLM_MODEL not set). Falling back to raw output.';\n mcpLog('warning', llmError, 'search');\n // mcp-revisions/llm-degradation/01: surface degraded mode to the client.\n await reporter.log('warning', 'llm_classifier_unreachable: planner not configured; raw ranked list returned');\n }\n let rawRefineQueries: RefineQuerySuggestion[] | undefined;\n if (useRaw && llmProcessor) {\n const refineResult = await suggestRefineQueriesForRawMode(\n aggregation.rankedUrls,\n params.extract,\n params.queries,\n llmProcessor,\n );\n rawRefineQueries = refineResult.result;\n }\n markdown = appendSignalsAndFollowUps(\n buildRawOutput(params.queries, aggregation, response.searches, params.verbose),\n buildSignalsSection(aggregation, response.searches, response.totalQueries),\n rawRefineQueries,\n { includeSignals: params.verbose },\n );\n await reporter.progress(80, 100, 'Ranking search results');\n } else {\n // LLM classification path\n await reporter.progress(65, 100, 'Classifying results by relevance');\n const classification = await classifySearchResults(\n aggregation.rankedUrls,\n params.extract,\n response.totalQueries,\n llmProcessor,\n params.queries,\n );\n\n if (classification.result) {\n markdown = buildClassifiedOutput(\n classification.result, aggregation, params.extract, response.searches, response.totalQueries, params.verbose,\n );\n llmClassified = true;\n await reporter.progress(85, 100, 'Formatted classified results');\n } else {\n // Classification failed \u2014 fall back to raw\n llmError = classification.error ?? 'Unknown classification error';\n mcpLog('warning', `Classification failed, falling back to raw: ${llmError}`, 'search');\n // mcp-revisions/llm-degradation/01: surface degraded mode to the client.\n await reporter.log('warning', `llm_classifier_unreachable: ${llmError}`);\n markdown = appendSignalsAndFollowUps(\n buildRawOutput(params.queries, aggregation, response.searches, params.verbose),\n buildSignalsSection(aggregation, response.searches, response.totalQueries),\n undefined,\n { includeSignals: params.verbose },\n );\n await reporter.progress(85, 100, 'Classification failed, using raw output');\n }\n }\n\n const executionTime = Date.now() - startTime;\n const metadata = buildMetadata(\n aggregation, executionTime, response.totalQueries, response.searches, llmClassified, params.scope, llmError,\n queryRewrites, retriedQueries, retryError,\n );\n\n // Build per-row structured results so capability-aware clients can\n // index into `structuredContent.results` rather than regex-scrape the\n // markdown table. The LLM tag wins when present; heuristic is the\n // always-on fallback.\n const llmTagsByRank = new Map<number, string>();\n // (When classification succeeds the source_type per-row is populated\n // inside buildClassifiedOutput via the entry.source_type field \u2014 but\n // we don't have a direct handle on it here without a refactor. The\n // heuristic alone covers the structuredContent shape correctly; the\n // LLM-tagged variant remains in the markdown body.)\n const results = buildStructuredResults(aggregation, llmTagsByRank);\n\n mcpLog('info', `Search completed: ${aggregation.rankedUrls.length} URLs, classified=${llmClassified}`, 'search');\n await reporter.log('info', `Search completed with ${aggregation.rankedUrls.length} URLs (classified: ${llmClassified})`);\n\n const footer = `\\n---\\n*${formatDuration(executionTime)} | ${aggregation.totalUniqueUrls} unique URLs${llmClassified ? ' | LLM classified' : ''}*`;\n const fullMarkdown = markdown + footer;\n\n return toolSuccess(fullMarkdown, { results, metadata });\n } catch (error) {\n return buildWebSearchError(error, params, startTime);\n }\n}\n\nexport function registerWebSearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'web-search',\n title: 'Web Search',\n description:\n 'Fan out Google queries in parallel. One call carries up to 50 queries in a flat `queries` array \u2014 pack diverse facets (not paraphrases) into a single call. Call me AGGRESSIVELY across a session: 2\u20134 rounds is normal, 1 is underuse. After each pass, read `gaps[]` + `refine_queries[]` and fire another round with the new terms. Safe to call multiple times in parallel in the same turn for orthogonal subtopics. `scope`: `\"reddit\"` (server appends `site:reddit.com` + filters to post permalinks \u2014 use for sentiment / migration / lived experience), `\"web\"` default (spec / bug / pricing / CVE / API), `\"both\"` (fan each query across both \u2014 use when opinion-heavy AND needs official sources). Returns a tiered Markdown report (HIGHLY_RELEVANT / MAYBE_RELEVANT / OTHER) + grounded synthesis with `[rank]` citations + `## Gaps` + `## Suggested follow-up searches` tied to gap ids. Set `raw=true` to skip classification.',\n schema: webSearchParamsSchema,\n outputSchema: webSearchOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: true,\n },\n },\n async (args, ctx) => {\n if (!getCapabilities().search) {\n return toToolResponse(toolFailure(getMissingEnvMessage('search')));\n }\n\n const reporter = createToolReporter(ctx, 'web-search');\n const result = await handleWebSearch(args, reporter);\n\n await reporter.progress(100, 100, result.isError ? 'Search failed' : 'Search complete');\n return toToolResponse(result);\n },\n );\n}\n"],
5
- "mappings": "AAOA,SAAS,iBAAiB,4BAA4B;AACtD;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP,SAAS,oBAAiD;AAC1D;AAAA,EACE;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAIK;AACP,SAAS,qBAA2C;AACpD,SAAS,2BAA2B;AACpC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAGK;AACP,SAAS,0BAA0B;AACnC;AAAA,EACE;AAAA,EACA;AAAA,OACK;AAsBP,MAAM,wBAAwB;AAC9B,MAAM,cAAc;AAQpB,SAAS,kBAAkB,OAAuB;AAChD,SAAO,wBAAwB,KAAK,KAAK,IAAI,QAAQ,GAAG,KAAK;AAC/D;AAEA,SAAS,mBAAmB,SAAmB,OAAiD;AAC9F,MAAI,UAAU,OAAO;AACnB,WAAO,QAAQ,IAAI,CAAC,WAAW,EAAE,OAAO,aAAa,OAAO,iBAAiB,KAAK,EAAE;AAAA,EACtF;AAEA,QAAM,WAAW,QAAQ;AAAA,IAAI,CAAC,OAC3B,EAAE,OAAO,kBAAkB,CAAC,GAAG,aAAa,UAAmB,iBAAiB,MAAM;AAAA,EACzF;AAEA,MAAI,UAAU,SAAU,QAAO;AAE/B,SAAO;AAAA,IACL,GAAG,QAAQ,IAAI,CAAC,WAAW,EAAE,OAAO,aAAa,OAAgB,iBAAiB,KAAK,EAAE;AAAA,IACzF,GAAG;AAAA,EACL;AACF;AAEA,eAAe,gBAAgB,SAA4C;AACzE,QAAM,SAAS,IAAI,aAAa;AAChC,SAAO,OAAO,eAAe,OAAO;AACtC;AAoBA,eAAe,sBACb,YACA,UACA,iBAAiC,iBACjC,eAAkE,CAAC,GAMlE;AACD,QAAM,UAAU,MAAM,eAAe,UAAU;AAE/C,MAAI,QAAQ,OAAO;AACjB,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,GAAG,cAAc,UAAU;AAAA,EACnE;AAEA,QAAM,eAAe,QAAQ,SAC1B,IAAI,CAAC,GAAG,MAAO,EAAE,QAAQ,WAAW,IAAI,IAAI,EAAG,EAC/C,OAAO,CAAC,MAAM,MAAM,EAAE;AAEzB,MAAI,aAAa,WAAW,GAAG;AAC7B,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,EAAE;AAAA,EAC1C;AAGA,QAAM,QAAgB,CAAC;AACvB,aAAW,OAAO,cAAc;AAC9B,UAAM,KAAK,WAAW,GAAG;AACzB,QAAI,OAAO,OAAO,SAAU;AAC5B,UAAM,IAAI,mBAAmB,IAAI,EAAE,UAAU,aAAa,kBAAkB,GAAG,KAAK,KAAK,CAAC;AAC1F,QAAI,EAAE,WAAW,EAAE,cAAc,IAAI;AACnC,YAAM,KAAK,EAAE,OAAO,KAAK,UAAU,IAAI,SAAS,EAAE,WAAW,OAAO,CAAC,GAAG,EAAE,KAAK,EAAE,CAAC;AAAA,IACpF;AAAA,EACF;AAEA,MAAI,MAAM,WAAW,GAAG;AACtB,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,EAAE;AAAA,EAC1C;AAEA;AAAA,IACE;AAAA,IACA,GAAG,MAAM,MAAM,IAAI,aAAa,MAAM;AAAA,IACtC;AAAA,EACF;AACA,QAAM,SAAS;AAAA,IACb;AAAA,IACA,GAAG,MAAM,MAAM;AAAA,EACjB;AAEA,QAAM,YAAY,MAAM,eAAe,MAAM,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAClE,QAAM,UAAgC,CAAC;AACvC,QAAM,eAAe,oBAAI,IAAgD;AAEzE,QAAM,QAAQ,CAAC,MAAM,MAAM;AACzB,UAAM,IAAI,UAAU,SAAS,CAAC;AAC9B,QAAI,EAAG,cAAa,IAAI,KAAK,OAAO,CAAC;AACrC,YAAQ,KAAK;AAAA,MACX,UAAU,KAAK;AAAA,MACf,cAAc,KAAK;AAAA,MACnB,OAAO,KAAK;AAAA,MACZ,mBAAmB,GAAG,QAAQ,UAAU;AAAA,IAC1C,CAAC;AAAA,EACH,CAAC;AAED,MAAI,UAAU,OAAO;AACnB;AAAA,MACE;AAAA,MACA,kEAAkE,UAAU,MAAM,OAAO;AAAA,MACzF;AAAA,IACF;AACA,UAAM,SAAS;AAAA,MACb;AAAA,MACA,8BAA8B,UAAU,MAAM,OAAO;AAAA,IACvD;AACA,WAAO;AAAA,MACL,UAAU;AAAA,MACV;AAAA,MACA,YAAY,UAAU;AAAA,IACxB;AAAA,EACF;AAEA,QAAM,iBAAiB,QAAQ,SAAS,IAAI,CAAC,GAAG,QAAQ;AACtD,UAAM,IAAI,aAAa,IAAI,GAAG;AAC9B,QAAI,KAAK,EAAE,QAAQ,SAAS,GAAG;AAC7B,aAAO,EAAE,GAAG,GAAG,OAAO,EAAE,MAAM;AAAA,IAChC;AACA,WAAO;AAAA,EACT,CAAC;AAED,SAAO;AAAA,IACL,UAAU,EAAE,GAAG,SAAS,UAAU,eAAe;AAAA,IACjD;AAAA,EACF;AACF;AAEA,SAAS,qBACP,UACA,OACA,eAA6C,CAAC,GAC9B;AAChB,MAAI,UAAU,MAAO,QAAO;AAC5B,QAAM,WAAW,SAAS,SAAS,IAAI,CAAC,QAAQ,UAAU;AACxD,UAAM,cAAc,aAAa,KAAK,MAAM,UAAU,WAAW,WAAW;AAC5E,WAAO;AAAA,MACL,GAAG;AAAA,MACH,SAAS,OAAO,QAAQ,OAAO,CAAC,MAAM;AACpC,YAAI;AACJ,YAAI;AAAE,iBAAO,IAAI,IAAI,EAAE,IAAI,EAAE;AAAA,QAAU,QAAQ;AAAE,iBAAO;AAAA,QAAM;AAC9D,YAAI,gBAAgB,UAAU;AAC5B,iBAAO,YAAY,KAAK,IAAI,KAAK,sBAAsB,KAAK,EAAE,IAAI;AAAA,QACpE;AAEA,YAAI,CAAC,YAAY,KAAK,IAAI,EAAG,QAAO;AACpC,eAAO,sBAAsB,KAAK,EAAE,IAAI;AAAA,MAC1C,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AACD,SAAO,EAAE,GAAG,UAAU,UAAU,SAAS;AAC3C;AAEA,SAAS,eAAe,UAEtB;AACA,QAAM,cAAc,iBAAiB,SAAS,UAAU,CAAC;AACzD,SAAO,EAAE,YAAY;AACvB;AAIA,SAAS,eACP,SACA,aACA,UACA,UAAmB,OACX;AACR,SAAO;AAAA,IACL,YAAY;AAAA,IAAY;AAAA,IAAS;AAAA,IACjC,YAAY;AAAA,IACZ,YAAY;AAAA,IAAoB,YAAY;AAAA,IAC5C;AAAA,EACF;AACF;AAEA,SAAS,oBACP,aACA,UACA,cACQ;AACR,QAAM,gBAAgB,SAAS,OAAO,CAAC,WAAW,OAAO,QAAQ,UAAU,CAAC,EAAE;AAC9E,QAAM,WAAW,SACd,OAAO,CAAC,WAAW,OAAO,QAAQ,UAAU,CAAC,EAC7C,IAAI,CAAC,WAAW,IAAI,OAAO,KAAK,GAAG;AACtC,QAAM,iBAAiB,YAAY,WAAW,OAAO,CAAC,QAAQ,IAAI,WAAW,EAAE;AAE/E,QAAM,QAAQ;AAAA,IACZ;AAAA,IACA,eAAe,aAAa,IAAI,YAAY;AAAA,IAC5C,qBAAqB,cAAc;AAAA,EACrC;AAEA,MAAI,SAAS,SAAS,GAAG;AACvB,UAAM,KAAK,gBAAgB,SAAS,KAAK,IAAI,CAAC,EAAE;AAAA,EAClD;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAEO,SAAS,+BACd,eACQ;AACR,MAAI,CAAC,iBAAiB,cAAc,WAAW,GAAG;AAChD,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,CAAC,mCAAmC,EAAE;AAEpD,aAAW,QAAQ,eAAe;AAChC,UAAM,QAAQ,mBAAmB,KAAK,SAAS,EAAE;AACjD,QAAI,CAAC,MAAO;AACZ,UAAM,YAAY,mBAAmB,KAAK,aAAa,EAAE;AACzD,UAAM,SAAS,OAAO,KAAK,WAAW,WAClC,kBAAkB,KAAK,MAAM,QAC7B,KAAK,kBACH,MAAM,mBAAmB,KAAK,eAAe,CAAC,OAC9C;AACN,UAAM;AAAA,MAAK,YACP,KAAK,KAAK,WAAM,SAAS,GAAG,MAAM,KAClC,KAAK,KAAK,GAAG,MAAM;AAAA,IACvB;AAAA,EACF;AAEA,SAAO,MAAM,WAAW,IAAI,KAAK,MAAM,KAAK,IAAI;AAClD;AAEO,SAAS,0BACd,UACA,gBACA,eACA,UAAwC,CAAC,GACjC;AACR,QAAM,iBAAiB,QAAQ,kBAAkB;AACjD,QAAM,WAAW,CAAC,QAAQ;AAC1B,MAAI,kBAAkB,gBAAgB;AACpC,aAAS,KAAK,IAAI,OAAO,cAAc;AAAA,EACzC;AACA,QAAM,YAAY,+BAA+B,aAAa;AAC9D,MAAI,WAAW;AACb,aAAS,KAAK,IAAI,SAAS;AAAA,EAC7B;AACA,SAAO,SAAS,KAAK,IAAI;AAC3B;AAYA,MAAM,iBAAiB;AACvB,MAAM,iBAAiB;AAchB,SAAS,sBACd,OACA,aACA,OAAuC,CAAC,GAChC;AACR,QAAM,MAAM,KAAK,OAAO;AACxB,QAAM,MAAM,KAAK,OAAO;AAExB,QAAM,QAA8F,CAAC;AAErG,aAAW,aAAa,MAAM,MAAM;AAClC,QAAI,MAAM,UAAU,IAAK;AACzB,UAAM,KAAK,EAAE,WAAW,MAAM,kBAAkB,CAAC;AAAA,EACnD;AAEA,MAAI,MAAM,SAAS,KAAK;AACtB,UAAM,SAAS,KAAK,IAAI,KAAK,GAAG;AAChC,eAAW,aAAa,MAAM,OAAO;AACnC,UAAI,MAAM,UAAU,OAAQ;AAC5B,YAAM,KAAK,EAAE,WAAW,MAAM,iBAAiB,CAAC;AAAA,IAClD;AAAA,EACF;AAEA,MAAI,MAAM,WAAW,EAAG,QAAO;AAE/B,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,uDAAkD;AAC7D,QAAM,QAAQ,CAAC,MAAM,MAAM;AACzB,UAAM,QAAQ,YAAY,IAAI,KAAK,UAAU,IAAI;AACjD,UAAM,SAAS,OAAO,UAAU,MAAM,OAAO,KAAK,EAAE,SAAS,IAAI,MAAM,SAAS;AAChF,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,KAAK,UAAU,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACpE,QAAQ;AACN,eAAS,KAAK,UAAU;AAAA,IAC1B;AACA,UAAM;AAAA,MACJ,GAAG,IAAI,CAAC,QAAQ,KAAK,UAAU,KAAK,KAAK,KAAK,UAAU,GAAG,cAAS,MAAM,WAAM,MAAM,MAAM,KAAK,IAAI,UAAU,KAAK,UAAU,IAAI;AAAA,IACpI;AAAA,EACF,CAAC;AACD,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,SAAS,sBACP,gBACA,aACA,SACA,UACA,cACA,UAAmB,OACX;AACR,QAAM,aAAa,YAAY;AAG/B,QAAM,cAAc,IAAI,IAAI,eAAe,QAAQ,IAAI,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAE1E,QAAM,QAAQ;AAAA,IACZ,MAAM,CAAC;AAAA,IACP,OAAO,CAAC;AAAA,IACR,OAAO,CAAC;AAAA,EACV;AAEA,aAAW,OAAO,YAAY;AAC5B,UAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,UAAM,OAAO,OAAO;AACpB,QAAI,SAAS,mBAAmB;AAC9B,YAAM,KAAK,KAAK,GAAG;AAAA,IACrB,WAAW,SAAS,kBAAkB;AACpC,YAAM,MAAM,KAAK,GAAG;AAAA,IACtB,OAAO;AACL,YAAM,MAAM,KAAK,GAAG;AAAA,IACtB;AAAA,EACF;AAEA,QAAM,QAAkB,CAAC;AAGzB,QAAM,KAAK,MAAM,eAAe,KAAK,EAAE;AACvC,QAAM,KAAK,kBAAkB,OAAO,EAAE;AACtC,QAAM,KAAK,KAAK,YAAY,mBAAc,WAAW,MAAM,gBAAW,MAAM,KAAK,MAAM,qBAAqB,MAAM,MAAM,MAAM,oBAAoB;AAClJ,MAAI,eAAe,YAAY;AAC7B,UAAM,aAAa,eAAe,oBAAoB,WAAM,eAAe,iBAAiB,KAAK;AACjG,UAAM,KAAK,mBAAmB,eAAe,UAAU,KAAK,UAAU,EAAE;AAAA,EAC1E;AACA,QAAM,KAAK,EAAE;AAIb,QAAM,YAAY;AAAA,IAChB,EAAE,MAAM,MAAM,MAAM,OAAO,MAAM,MAAM;AAAA,IACvC;AAAA,EACF;AACA,MAAI,WAAW;AACb,UAAM,KAAK,SAAS;AACpB,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,QAAM,KAAK,gBAAgB,eAAe,SAAS,EAAE;AACrD,QAAM,KAAK,EAAE;AAGb,QAAM,gBAAgB,CAAC,QAA2C;AAChE,UAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,UAAM,cAAc,KAAK,MAAM,IAAI,gBAAgB,GAAG;AACtD,UAAM,SAAS,GAAG,IAAI,SAAS,IAAI,YAAY,KAAK,WAAW;AAC/D,UAAM,aAAa,OAAO,cAAc,KAAK,MAAM,WAAW,OAAO;AACrE,UAAM,SAAS,OAAO,SAAS,MAAM,OAAO,QAAQ,OAAO,KAAK,IAAI;AACpE,WAAO,KAAK,IAAI,IAAI,OAAO,IAAI,KAAK,KAAK,IAAI,GAAG,OAAO,UAAU,MAAM,MAAM,MAAM,MAAM;AAAA,EAC3F;AAGA,MAAI,MAAM,KAAK,SAAS,GAAG;AACzB,UAAM,KAAK,wBAAwB,MAAM,KAAK,MAAM,GAAG;AACvD,UAAM,KAAK,sCAAsC;AACjD,UAAM,KAAK,sCAAsC;AACjD,eAAW,OAAO,MAAM,KAAM,OAAM,KAAK,cAAc,GAAG,CAAC;AAC3D,UAAM,KAAK,EAAE;AAAA,EACf;AAGA,MAAI,MAAM,MAAM,SAAS,GAAG;AAC1B,UAAM,KAAK,uBAAuB,MAAM,MAAM,MAAM,GAAG;AACvD,UAAM,KAAK,sCAAsC;AACjD,UAAM,KAAK,sCAAsC;AACjD,eAAW,OAAO,MAAM,MAAO,OAAM,KAAK,cAAc,GAAG,CAAC;AAC5D,UAAM,KAAK,EAAE;AAAA,EACf;AAGA,MAAI,MAAM,MAAM,SAAS,GAAG;AAC1B,UAAM,KAAK,sBAAsB,MAAM,MAAM,MAAM,GAAG;AACtD,UAAM,KAAK,wCAAwC;AACnD,UAAM,KAAK,wCAAwC;AACnD,eAAW,OAAO,MAAM,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,YAAM,YAAY,IAAI,QAAQ,IAAI,CAAC,MAAM,IAAI,CAAC,GAAG,EAAE,KAAK,IAAI;AAC5D,YAAM,aAAa,OAAO,cAAc,KAAK,MAAM,WAAW,OAAO;AACrE,UAAI;AACJ,UAAI;AACF,iBAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,MACzD,QAAQ;AACN,iBAAS,IAAI;AAAA,MACf;AACA,YAAM,KAAK,KAAK,IAAI,IAAI,MAAM,MAAM,MAAM,UAAU,MAAM,IAAI,MAAM,QAAQ,CAAC,CAAC,MAAM,SAAS,IAAI;AAAA,IACnG;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAKA,MAAI,SAAS;AACX,UAAM,KAAK,oBAAoB,aAAa,UAAU,YAAY,CAAC;AAAA,EACrE;AAGA,MAAI,eAAe,QAAQ,eAAe,KAAK,SAAS,GAAG;AACzD,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,SAAS;AACpB,eAAW,OAAO,eAAe,MAAM;AACrC,YAAM,KAAK,QAAQ,IAAI,EAAE,OAAO,IAAI,WAAW,EAAE;AAAA,IACnD;AAAA,EACF;AAEA,QAAM,YAAY,+BAA+B,eAAe,cAAc;AAC9E,MAAI,WAAW;AACb,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,SAAS;AAAA,EACtB;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,SAAS,cACP,aACA,eACA,cACA,UACA,eACA,OACA,UACA,eACA,gBACA,YACA;AACA,QAAM,kBAAkB,SAAS,IAAI,OAAK;AACxC,QAAI;AACJ,UAAM,YAAY,EAAE,QAAQ,CAAC;AAC7B,QAAI,WAAW;AACb,UAAI;AAAE,oBAAY,IAAI,IAAI,UAAU,IAAI,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,MAAG,QAAQ;AAAA,MAAe;AAAA,IACnG;AACA,WAAO,EAAE,OAAO,EAAE,OAAO,cAAc,EAAE,QAAQ,QAAQ,SAAS,UAAU;AAAA,EAC9E,CAAC;AACD,QAAM,kBAAkB,SACrB,OAAO,OAAK,EAAE,QAAQ,UAAU,CAAC,EACjC,IAAI,OAAK,EAAE,KAAK;AACnB,QAAM,oBAAoB,SAAS,OAAO,OAAK,EAAE,QAAQ,SAAS,CAAC,EAAE;AAErE,SAAO;AAAA,IACL,aAAa;AAAA,IACb,YAAY;AAAA,IACZ,QAAQ,KAAK,IAAI,eAAe,mBAAmB,CAAC;AAAA,IACpD,mBAAmB;AAAA,IACnB,gBAAgB;AAAA,IAChB;AAAA,IACA,GAAI,WAAW,EAAE,WAAW,SAAS,IAAI,CAAC;AAAA,IAC1C,kBAAkB;AAAA,IAClB,GAAI,gBAAgB,SAAS,IAAI,EAAE,mBAAmB,gBAAgB,IAAI,CAAC;AAAA,IAC3E,GAAI,iBAAiB,cAAc,SAAS,IAAI,EAAE,gBAAgB,cAAc,IAAI,CAAC;AAAA,IACrF,GAAI,kBAAkB,eAAe,SAAS,IAAI,EAAE,iBAAiB,eAAe,IAAI,CAAC;AAAA,IACzF,GAAI,aACA;AAAA,MACE,aAAa;AAAA,QACX,OAAO;AAAA,QACP,MAAM,WAAW;AAAA,QACjB,SAAS,WAAW;AAAA,QACpB,WAAW,WAAW;AAAA,QACtB,GAAI,OAAO,WAAW,eAAe,WAAW,EAAE,YAAY,WAAW,WAAW,IAAI,CAAC;AAAA,MAC3F;AAAA,IACF,IACA,CAAC;AAAA,EACP;AACF;AAEA,SAAS,uBACP,aACA,eAUC;AACD,SAAO,YAAY,WAAW,IAAI,CAAC,QAAQ;AAGzC,UAAM,SAAS,eAAe,IAAI,IAAI,IAAI;AAC1C,UAAM,YAAY,oBAAoB,IAAI,GAAG;AAC7C,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,KAAK,IAAI;AAAA,MACT,OAAO,IAAI;AAAA,MACX,SAAS,IAAI;AAAA,MACb,aAAe,UAA+B;AAAA,MAC9C,OAAO,OAAO,IAAI,MAAM,QAAQ,CAAC,CAAC;AAAA,MAClC,SAAS,IAAI;AAAA,MACb,eAAe,IAAI;AAAA,IACrB;AAAA,EACF,CAAC;AACH;AAIA,SAAS,kBAAkB,OAA0C;AACnE,MAAI,OAAO,UAAU,YAAY,UAAU,KAAM,QAAO;AACxD,QAAM,SAAS;AACf,SAAO,OAAO,OAAO,SAAS,YACzB,OAAO,OAAO,YAAY,YAC1B,OAAO,OAAO,cAAc;AACnC;AAEA,SAAS,yBAAyB,OAAiC;AACjE,SAAO,kBAAkB,KAAK,IAAI,QAAQ,cAAc,KAAK;AAC/D;AAEA,SAAS,2BACP,OACA,OACQ;AACR,MAAI,UAAU,WAAW;AACvB,WAAO,gDAAgD,MAAM,OAAO;AAAA,EACtE;AAEA,MAAI,UAAU,eAAe;AAC3B,WAAO,sDAAsD,MAAM,OAAO;AAAA,EAC5E;AAEA,SAAO,MAAM;AACf;AAEA,SAAS,oBACP,OACA,QACA,WACA,OACsC;AACtC,QAAM,kBAAkB,yBAAyB,KAAK;AACtD,QAAM,UAAU,2BAA2B,iBAAiB,KAAK;AACjE,QAAM,gBAAgB,KAAK,IAAI,IAAI;AAEnC,SAAO,SAAS,eAAe,OAAO,IAAI,QAAQ;AAElD,QAAM,eAAe,YAAY;AAAA,IAC/B,MAAM,gBAAgB;AAAA,IACtB;AAAA,IACA,WAAW,gBAAgB;AAAA,IAC3B,UAAU;AAAA,IACV,UAAU,CAAC,wCAAwC;AAAA,IACnD,cAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL,GAAG,YAAY;AAAA;AAAA,kBAAuB,eAAe,aAAa,CAAC;AAAA,WAAc,OAAO,QAAQ,MAAM;AAAA,EACxG;AACF;AAIA,eAAsB,gBACpB,QACA,WAAyB,eACzB,iBAAiC,iBACc;AAC/C,QAAM,YAAY,KAAK,IAAI;AAE3B,MAAI;AACF,UAAM,gBAAgB,mBAAmB,OAAO,SAAS,OAAO,KAAK;AACrE,UAAM,mBAAmB,cAAc,IAAI,CAAC,UAAU,MAAM,KAAK;AACjE,QAAI,OAAO,UAAU,OAAO;AAC1B,aAAO,QAAQ,mBAAmB,OAAO,KAAK,KAAK,OAAO,QAAQ,MAAM,yBAAoB,iBAAiB,MAAM,eAAe,QAAQ;AAAA,IAC5I,OAAO;AACL,aAAO,QAAQ,iBAAiB,OAAO,QAAQ,MAAM,kBAAkB,QAAQ;AAAA,IACjF;AACA,UAAM,SAAS,IAAI,QAAQ,iBAAiB,iBAAiB,MAAM,yBAAyB,OAAO,KAAK,GAAG;AAC3G,UAAM,SAAS,SAAS,IAAI,KAAK,2BAA2B;AAK5D,UAAM,eAAe,iBAAiB,IAAI,CAAC,MAAM;AAC/C,YAAM,IAAI,0BAA0B,CAAC;AACrC,aAAO,EAAE,UAAU,GAAG,YAAY,EAAE,WAAW,OAAO,CAAC,GAAG,EAAE,KAAK,GAAG,SAAS,EAAE,QAAQ;AAAA,IACzF,CAAC;AACD,UAAM,oBAAoB,aAAa,IAAI,CAAC,MAAM,EAAE,UAAU;AAC9D,UAAM,eAAe,cAAc,IAAI,CAAC,UAAU,MAAM,WAAW;AACnE,UAAM,kBAAkB,cAAc,IAAI,CAAC,UAAU,MAAM,eAAe;AAC1E,UAAM,gBAAsC,aACzC,OAAO,CAAC,MAAM,EAAE,OAAO,EACvB,IAAI,CAAC,OAAO,EAAE,UAAU,EAAE,UAAU,WAAW,EAAE,YAAY,OAAO,EAAE,MAAM,EAAE;AAEjF,QAAI,cAAc,SAAS,GAAG;AAC5B;AAAA,QACE;AAAA,QACA,2BAA2B,cAAc,MAAM,IAAI,iBAAiB,MAAM;AAAA,QAC1E;AAAA,MACF;AACA,YAAM,SAAS;AAAA,QACb;AAAA,QACA,cAAc,cAAc,MAAM;AAAA,MACpC;AAAA,IACF;AAKA,UAAM;AAAA,MACJ,UAAU;AAAA,MACV,SAAS;AAAA,MACT;AAAA,MACA;AAAA,IACF,IAAI,MAAM;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,gBAAgB;AAAA,IACpB;AAEA,QAAI,YAAY,OAAO;AACrB,YAAM,SAAS,IAAI,SAAS,2BAA2B,YAAY,MAAM,OAAO,EAAE;AAClF,aAAO,oBAAoB,YAAY,OAAO,QAAQ,WAAW,YAAY;AAAA,IAC/E;AAEA,UAAM,WAAW,qBAAqB,aAAa,OAAO,OAAO,YAAY;AAC7E,UAAM,SAAS,SAAS,IAAI,KAAK,0BAA0B;AAE3D,UAAM,EAAE,YAAY,IAAI,eAAe,QAAQ;AAC/C,UAAM,SAAS;AAAA,MACb;AAAA,MACA,aAAa,YAAY,eAAe,uBAAuB,SAAS,YAAY;AAAA,IACtF;AAGA,UAAM,SAAS,OAAO;AACtB,UAAM,eAAe,mBAAmB;AAExC,QAAI;AACJ,QAAI,gBAAgB;AACpB,QAAI;AAEJ,QAAI,UAAU,CAAC,cAAc;AAE3B,UAAI,CAAC,UAAU,CAAC,cAAc;AAC5B,mBAAW;AACX,eAAO,WAAW,UAAU,QAAQ;AAEpC,cAAM,SAAS,IAAI,WAAW,8EAA8E;AAAA,MAC9G;AACA,UAAI;AACJ,UAAI,UAAU,cAAc;AAC1B,cAAM,eAAe,MAAM;AAAA,UACzB,YAAY;AAAA,UACZ,OAAO;AAAA,UACP,OAAO;AAAA,UACP;AAAA,QACF;AACA,2BAAmB,aAAa;AAAA,MAClC;AACA,iBAAW;AAAA,QACT,eAAe,OAAO,SAAS,aAAa,SAAS,UAAU,OAAO,OAAO;AAAA,QAC7E,oBAAoB,aAAa,SAAS,UAAU,SAAS,YAAY;AAAA,QACzE;AAAA,QACA,EAAE,gBAAgB,OAAO,QAAQ;AAAA,MACnC;AACA,YAAM,SAAS,SAAS,IAAI,KAAK,wBAAwB;AAAA,IAC3D,OAAO;AAEL,YAAM,SAAS,SAAS,IAAI,KAAK,kCAAkC;AACnE,YAAM,iBAAiB,MAAM;AAAA,QAC3B,YAAY;AAAA,QACZ,OAAO;AAAA,QACP,SAAS;AAAA,QACT;AAAA,QACA,OAAO;AAAA,MACT;AAEA,UAAI,eAAe,QAAQ;AACzB,mBAAW;AAAA,UACT,eAAe;AAAA,UAAQ;AAAA,UAAa,OAAO;AAAA,UAAS,SAAS;AAAA,UAAU,SAAS;AAAA,UAAc,OAAO;AAAA,QACvG;AACA,wBAAgB;AAChB,cAAM,SAAS,SAAS,IAAI,KAAK,8BAA8B;AAAA,MACjE,OAAO;AAEL,mBAAW,eAAe,SAAS;AACnC,eAAO,WAAW,+CAA+C,QAAQ,IAAI,QAAQ;AAErF,cAAM,SAAS,IAAI,WAAW,+BAA+B,QAAQ,EAAE;AACvE,mBAAW;AAAA,UACT,eAAe,OAAO,SAAS,aAAa,SAAS,UAAU,OAAO,OAAO;AAAA,UAC7E,oBAAoB,aAAa,SAAS,UAAU,SAAS,YAAY;AAAA,UACzE;AAAA,UACA,EAAE,gBAAgB,OAAO,QAAQ;AAAA,QACnC;AACA,cAAM,SAAS,SAAS,IAAI,KAAK,yCAAyC;AAAA,MAC5E;AAAA,IACF;AAEA,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,UAAM,WAAW;AAAA,MACf;AAAA,MAAa;AAAA,MAAe,SAAS;AAAA,MAAc,SAAS;AAAA,MAAU;AAAA,MAAe,OAAO;AAAA,MAAO;AAAA,MACnG;AAAA,MAAe;AAAA,MAAgB;AAAA,IACjC;AAMA,UAAM,gBAAgB,oBAAI,IAAoB;AAM9C,UAAM,UAAU,uBAAuB,aAAa,aAAa;AAEjE,WAAO,QAAQ,qBAAqB,YAAY,WAAW,MAAM,qBAAqB,aAAa,IAAI,QAAQ;AAC/G,UAAM,SAAS,IAAI,QAAQ,yBAAyB,YAAY,WAAW,MAAM,sBAAsB,aAAa,GAAG;AAEvH,UAAM,SAAS;AAAA;AAAA,GAAW,eAAe,aAAa,CAAC,MAAM,YAAY,eAAe,eAAe,gBAAgB,sBAAsB,EAAE;AAC/I,UAAM,eAAe,WAAW;AAEhC,WAAO,YAAY,cAAc,EAAE,SAAS,SAAS,CAAC;AAAA,EACxD,SAAS,OAAO;AACd,WAAO,oBAAoB,OAAO,QAAQ,SAAS;AAAA,EACrD;AACF;AAEO,SAAS,sBAAsB,QAAyB;AAC7D,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,MAAM,QAAQ;AACnB,UAAI,CAAC,gBAAgB,EAAE,QAAQ;AAC7B,eAAO,eAAe,YAAY,qBAAqB,QAAQ,CAAC,CAAC;AAAA,MACnE;AAEA,YAAM,WAAW,mBAAmB,KAAK,YAAY;AACrD,YAAM,SAAS,MAAM,gBAAgB,MAAM,QAAQ;AAEnD,YAAM,SAAS,SAAS,KAAK,KAAK,OAAO,UAAU,kBAAkB,iBAAiB;AACtF,aAAO,eAAe,MAAM;AAAA,IAC9B;AAAA,EACF;AACF;",
4
+ "sourcesContent": ["/**\n * Web Search Tool Handler\n * NEVER throws - always returns structured response for graceful degradation\n */\n\nimport type { MCPServer } from 'mcp-use/server';\n\nimport { getCapabilities, getMissingEnvMessage } from '../config/index.js';\nimport {\n webSearchOutputSchema,\n webSearchParamsSchema,\n type WebSearchParams,\n type WebSearchOutput,\n} from '../schemas/web-search.js';\nimport { SearchClient, type MultipleSearchResponse } from '../clients/search.js';\nimport {\n aggregateAndRank,\n generateUnifiedOutput,\n} from '../utils/url-aggregator.js';\nimport {\n createLLMProcessor,\n classifySearchResults,\n suggestRefineQueriesForRawMode,\n type ClassificationEntry,\n type ClassificationResult,\n type RefineQuerySuggestion,\n} from '../services/llm-processor.js';\nimport { classifyError, type StructuredError } from '../utils/errors.js';\nimport { classifySourceByUrl } from '../utils/source-type.js';\nimport {\n mcpLog,\n formatError,\n formatDuration,\n} from './utils.js';\nimport {\n createToolReporter,\n NOOP_REPORTER,\n toolFailure,\n toolSuccess,\n toToolResponse,\n type ToolExecutionResult,\n type ToolReporter,\n} from './mcp-helpers.js';\nimport { sanitizeSuggestion } from '../utils/sanitize.js';\nimport {\n normalizeQueryForDispatch,\n relaxQueryForRetry,\n} from '../utils/query-relax.js';\n\n// --- Internal types ---\n\ninterface SearchAggregation {\n readonly rankedUrls: ReturnType<typeof aggregateAndRank>['rankedUrls'];\n readonly totalUniqueUrls: number;\n readonly frequencyThreshold: number;\n readonly thresholdNote?: string;\n}\n\nexport type SearchResponse = MultipleSearchResponse;\nexport type SearchExecutor = (queries: string[]) => Promise<SearchResponse>;\n\ntype SearchFailurePhase = 'initial' | 'relax-retry';\ntype SearchResultScope = 'web' | 'reddit';\n\n// --- Helpers ---\n\n/** Reddit post permalink: /r/{sub}/comments/{id}/ \u2014 drops subreddit\n * homepages, /rising, /new, /top, etc. so only post URLs reach the agent.\n * See mcp-revisions/tool-surface/02-extend-web-search-with-reddit-scope.md. */\nconst REDDIT_POST_PERMALINK = /\\/r\\/[^/]+\\/comments\\/[a-z0-9]+\\//i;\nconst REDDIT_HOST = /(?:^|\\.)reddit\\.com$/i;\n\ninterface ScopedQuery {\n query: string;\n resultScope: SearchResultScope;\n dropSiteOnRetry: boolean;\n}\n\nfunction redditScopedQuery(query: string): string {\n return /\\bsite:reddit\\.com\\b/i.test(query) ? query : `${query} site:reddit.com`;\n}\n\nfunction buildScopedQueries(queries: string[], scope: 'web' | 'reddit' | 'both'): ScopedQuery[] {\n if (scope === 'web') {\n return queries.map((query) => ({ query, resultScope: 'web', dropSiteOnRetry: true }));\n }\n\n const reddited = queries.map((q) =>\n ({ query: redditScopedQuery(q), resultScope: 'reddit' as const, dropSiteOnRetry: false }),\n );\n\n if (scope === 'reddit') return reddited;\n\n return [\n ...queries.map((query) => ({ query, resultScope: 'web' as const, dropSiteOnRetry: true })),\n ...reddited,\n ];\n}\n\nasync function executeSearches(queries: string[]): Promise<SearchResponse> {\n const client = new SearchClient();\n return client.searchMultiple(queries);\n}\n\ninterface QueryRewriteRecord {\n original: string;\n rewritten: string;\n rules: string[];\n}\n\ninterface RetriedQueryRecord {\n original: string;\n retried_with: string;\n rules: string[];\n recovered_results: number;\n}\n\n/** Run Serper, then for each query that returned 0 results build a relaxed\n * retry (Phase B) and reissue them in a single second batch. Replace the\n * empty slot with the retry's results when the retry recovered \u22651 hit, but\n * keep the original query string in the slot so downstream aggregation and\n * follow-up rendering stay consistent. */\nasync function executeWithRelaxRetry(\n dispatched: string[],\n reporter: ToolReporter,\n searchExecutor: SearchExecutor = executeSearches,\n retryOptions: { readonly dropSiteOnRetry?: readonly boolean[] } = {},\n): Promise<{\n response: SearchResponse;\n retried: RetriedQueryRecord[];\n failurePhase?: SearchFailurePhase;\n retryError?: StructuredError;\n}> {\n const initial = await searchExecutor(dispatched);\n\n if (initial.error) {\n return { response: initial, retried: [], failurePhase: 'initial' };\n }\n\n const emptyIndices = initial.searches\n .map((s, i) => (s.results.length === 0 ? i : -1))\n .filter((i) => i !== -1);\n\n if (emptyIndices.length === 0) {\n return { response: initial, retried: [] };\n }\n\n interface Plan { index: number; original: string; relaxed: string; rules: string[] }\n const plans: Plan[] = [];\n for (const idx of emptyIndices) {\n const dq = dispatched[idx];\n if (typeof dq !== 'string') continue;\n const r = relaxQueryForRetry(dq, { dropSite: retryOptions.dropSiteOnRetry?.[idx] ?? true });\n if (r.changed && r.rewritten !== dq) {\n plans.push({ index: idx, original: dq, relaxed: r.rewritten, rules: [...r.rules] });\n }\n }\n\n if (plans.length === 0) {\n return { response: initial, retried: [] };\n }\n\n mcpLog(\n 'info',\n `${plans.length}/${emptyIndices.length} empty-result queries eligible for relaxation retry`,\n 'search',\n );\n await reporter.log(\n 'info',\n `${plans.length} queries returned 0 results; retrying with relaxation`,\n );\n\n const retryResp = await searchExecutor(plans.map((p) => p.relaxed));\n const retried: RetriedQueryRecord[] = [];\n const retryByIndex = new Map<number, SearchResponse['searches'][number]>();\n\n plans.forEach((plan, i) => {\n const r = retryResp.searches[i];\n if (r) retryByIndex.set(plan.index, r);\n retried.push({\n original: plan.original,\n retried_with: plan.relaxed,\n rules: plan.rules,\n recovered_results: r?.results.length ?? 0,\n });\n });\n\n if (retryResp.error) {\n mcpLog(\n 'warning',\n `Relaxed retry batch failed; preserving initial search results: ${retryResp.error.message}`,\n 'search',\n );\n await reporter.log(\n 'warning',\n `search_relax_retry_failed: ${retryResp.error.message}`,\n );\n return {\n response: initial,\n retried,\n retryError: retryResp.error,\n };\n }\n\n const mergedSearches = initial.searches.map((s, idx) => {\n const r = retryByIndex.get(idx);\n if (r && r.results.length > 0) {\n return { ...r, query: s.query };\n }\n return s;\n });\n\n return {\n response: { ...initial, searches: mergedSearches },\n retried,\n };\n}\n\nfunction filterScopedSearches(\n response: SearchResponse,\n scope: 'web' | 'reddit' | 'both',\n resultScopes: readonly SearchResultScope[] = [],\n): SearchResponse {\n if (scope === 'web') return response;\n const filtered = response.searches.map((search, index) => {\n const resultScope = resultScopes[index] ?? (scope === 'reddit' ? 'reddit' : 'web');\n return {\n ...search,\n results: search.results.filter((r) => {\n let host: string;\n try { host = new URL(r.link).hostname; } catch { return true; }\n if (resultScope === 'reddit') {\n return REDDIT_HOST.test(host) && REDDIT_POST_PERMALINK.test(r.link);\n }\n // Web-side results pass through; reddit URLs still must be post permalinks.\n if (!REDDIT_HOST.test(host)) return true;\n return REDDIT_POST_PERMALINK.test(r.link);\n }),\n };\n });\n return { ...response, searches: filtered };\n}\n\nfunction processResults(response: SearchResponse): {\n aggregation: SearchAggregation;\n} {\n const aggregation = aggregateAndRank(response.searches, 5);\n return { aggregation };\n}\n\n// --- Raw output (traditional unified ranked list) ---\n\nfunction buildRawOutput(\n queries: string[],\n aggregation: SearchAggregation,\n searches: SearchResponse['searches'],\n verbose: boolean = false,\n): string {\n return generateUnifiedOutput(\n aggregation.rankedUrls, queries, searches,\n aggregation.totalUniqueUrls,\n aggregation.frequencyThreshold, aggregation.thresholdNote,\n verbose,\n );\n}\n\nfunction buildSignalsSection(\n aggregation: SearchAggregation,\n searches: SearchResponse['searches'],\n totalQueries: number,\n): string {\n const coverageCount = searches.filter((search) => search.results.length >= 3).length;\n const lowYield = searches\n .filter((search) => search.results.length <= 1)\n .map((search) => `\"${search.query}\"`);\n const consensusCount = aggregation.rankedUrls.filter((url) => url.isConsensus).length;\n\n const lines = [\n '**Signals**',\n `- Coverage: ${coverageCount}/${totalQueries} queries returned \u22653 results`,\n `- Consensus URLs: ${consensusCount}`,\n ];\n\n if (lowYield.length > 0) {\n lines.push(`- Low-yield: ${lowYield.join(', ')}`);\n }\n\n return lines.join('\\n');\n}\n\nexport function buildSuggestedFollowUpsSection(\n refineQueries: Array<{ query: string; rationale?: string; gap_id?: number; gap_description?: string }> | undefined,\n): string {\n if (!refineQueries || refineQueries.length === 0) {\n return '';\n }\n\n const lines = ['## Suggested follow-up searches', ''];\n\n for (const item of refineQueries) {\n const query = sanitizeSuggestion(item.query ?? '');\n if (!query) continue;\n const rationale = sanitizeSuggestion(item.rationale ?? '');\n const gapTag = typeof item.gap_id === 'number'\n ? ` _(closes gap [${item.gap_id}])_`\n : item.gap_description\n ? ` _(${sanitizeSuggestion(item.gap_description)})_`\n : '';\n lines.push(rationale\n ? `- ${query} \u2014 ${rationale}${gapTag}`\n : `- ${query}${gapTag}`,\n );\n }\n\n return lines.length === 2 ? '' : lines.join('\\n');\n}\n\nexport function appendSignalsAndFollowUps(\n markdown: string,\n signalsSection: string,\n refineQueries: RefineQuerySuggestion[] | undefined,\n options: { includeSignals?: boolean } = {},\n): string {\n const includeSignals = options.includeSignals ?? false;\n const sections = [markdown];\n if (includeSignals && signalsSection) {\n sections.push('', '---', signalsSection);\n }\n const followUps = buildSuggestedFollowUpsSection(refineQueries);\n if (followUps) {\n sections.push('', followUps);\n }\n return sections.join('\\n');\n}\n\n// --- \"Start here\" section ---\n//\n// Surfaces the best 3-5 URLs at the top of the classified response so an agent\n// skimming the first screen sees them before tier tables. Deterministic: uses\n// existing `tier` + `rank` + `reason` from the classifier, no extra LLM call.\n//\n// Algorithm: take HIGHLY_RELEVANT by rank up to MAX_START_HERE; if fewer than\n// MIN_START_HERE, pad from top MAYBE_RELEVANT; skip entirely if no entries\n// above OTHER.\n\nconst MIN_START_HERE = 3;\nconst MAX_START_HERE = 5;\n\n/** Minimal structural shape \u2014 avoids coupling to private `RankedUrl` type. */\ninterface StartHereCandidate {\n readonly rank: number;\n readonly url: string;\n readonly title: string;\n}\n\ninterface StartHereTiers {\n readonly high: readonly StartHereCandidate[];\n readonly maybe: readonly StartHereCandidate[];\n}\n\nexport function buildStartHereSection(\n tiers: StartHereTiers,\n entryByRank: Map<number, ClassificationEntry>,\n opts: { min?: number; max?: number } = {},\n): string {\n const min = opts.min ?? MIN_START_HERE;\n const max = opts.max ?? MAX_START_HERE;\n\n const picks: Array<{ candidate: StartHereCandidate; tier: 'HIGHLY_RELEVANT' | 'MAYBE_RELEVANT' }> = [];\n\n for (const candidate of tiers.high) {\n if (picks.length >= max) break;\n picks.push({ candidate, tier: 'HIGHLY_RELEVANT' });\n }\n\n if (picks.length < min) {\n const target = Math.min(min, max);\n for (const candidate of tiers.maybe) {\n if (picks.length >= target) break;\n picks.push({ candidate, tier: 'MAYBE_RELEVANT' });\n }\n }\n\n if (picks.length === 0) return '';\n\n const lines: string[] = [];\n lines.push('## Start here \u2014 best candidates for your extract');\n picks.forEach((pick, i) => {\n const entry = entryByRank.get(pick.candidate.rank);\n const reason = entry?.reason && entry.reason.trim().length > 0 ? entry.reason : '\u2014';\n let domain: string;\n try {\n domain = new URL(pick.candidate.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = pick.candidate.url;\n }\n lines.push(\n `${i + 1}. **[${pick.candidate.title}](${pick.candidate.url})** \u2014 ${domain} \u2014 ${reason} *(${pick.tier}, rank ${pick.candidate.rank})*`,\n );\n });\n return lines.join('\\n');\n}\n\n// --- Classified output (3-tier LLM-classified table) ---\n\nfunction buildClassifiedOutput(\n classification: ClassificationResult,\n aggregation: SearchAggregation,\n extract: string,\n searches: SearchResponse['searches'],\n totalQueries: number,\n verbose: boolean = false,\n): string {\n const rankedUrls = aggregation.rankedUrls;\n\n // Build tier \u2192 entries mapping (keep url data alongside classifier metadata)\n const entryByRank = new Map(classification.results.map((r) => [r.rank, r]));\n\n const tiers = {\n high: [] as typeof rankedUrls,\n maybe: [] as typeof rankedUrls,\n other: [] as typeof rankedUrls,\n };\n\n for (const url of rankedUrls) {\n const entry = entryByRank.get(url.rank);\n const tier = entry?.tier;\n if (tier === 'HIGHLY_RELEVANT') {\n tiers.high.push(url);\n } else if (tier === 'MAYBE_RELEVANT') {\n tiers.maybe.push(url);\n } else {\n tiers.other.push(url);\n }\n }\n\n const lines: string[] = [];\n\n // Header with generated title, synthesis, and confidence\n lines.push(`## ${classification.title}`);\n lines.push(`> Looking for: ${extract}`);\n lines.push(`> ${totalQueries} queries \u2192 ${rankedUrls.length} URLs \u2192 ${tiers.high.length} highly relevant, ${tiers.maybe.length} possibly relevant`);\n if (classification.confidence) {\n const confReason = classification.confidence_reason ? ` \u2014 ${classification.confidence_reason}` : '';\n lines.push(`> Confidence: \\`${classification.confidence}\\`${confReason}`);\n }\n lines.push('');\n\n // \"Start here\" block: surface the top 3-5 URLs above the synthesis so an\n // agent skimming the first screen sees scrape candidates before prose.\n const startHere = buildStartHereSection(\n { high: tiers.high, maybe: tiers.maybe },\n entryByRank,\n );\n if (startHere) {\n lines.push(startHere);\n lines.push('');\n }\n\n lines.push(`**Summary:** ${classification.synthesis}`);\n lines.push('');\n\n // Helper: render one row with optional source_type + reason\n const renderRichRow = (url: typeof rankedUrls[number]): string => {\n const entry = entryByRank.get(url.rank);\n const coveragePct = Math.round(url.coverageRatio * 100);\n const seenIn = `${url.frequency}/${totalQueries} (${coveragePct}%)`;\n const sourceType = entry?.source_type ? `\\`${entry.source_type}\\`` : '\u2014';\n const reason = entry?.reason ? entry.reason.replace(/\\|/g, '\\\\|') : '\u2014';\n return `| ${url.rank} | [${url.title}](${url.url}) | ${sourceType} | ${seenIn} | ${reason} |`;\n };\n\n // Highly Relevant tier\n if (tiers.high.length > 0) {\n lines.push(`### Highly Relevant (${tiers.high.length})`);\n lines.push('| # | URL | Source | Seen in | Why |');\n lines.push('|---|-----|--------|---------|-----|');\n for (const url of tiers.high) lines.push(renderRichRow(url));\n lines.push('');\n }\n\n // Maybe Relevant tier\n if (tiers.maybe.length > 0) {\n lines.push(`### Maybe Relevant (${tiers.maybe.length})`);\n lines.push('| # | URL | Source | Seen in | Why |');\n lines.push('|---|-----|--------|---------|-----|');\n for (const url of tiers.maybe) lines.push(renderRichRow(url));\n lines.push('');\n }\n\n // Other tier \u2014 with query attribution\n if (tiers.other.length > 0) {\n lines.push(`### Other Results (${tiers.other.length})`);\n lines.push('| # | URL | Source | Score | Queries |');\n lines.push('|---|-----|--------|-------|---------|');\n for (const url of tiers.other) {\n const entry = entryByRank.get(url.rank);\n const queryList = url.queries.map((q) => `\"${q}\"`).join(', ');\n const sourceType = entry?.source_type ? `\\`${entry.source_type}\\`` : '\u2014';\n let domain: string;\n try {\n domain = new URL(url.url).hostname.replace(/^www\\./, '');\n } catch {\n domain = url.url;\n }\n lines.push(`| ${url.rank} | ${domain} | ${sourceType} | ${url.score.toFixed(1)} | ${queryList} |`);\n }\n lines.push('');\n }\n\n // Signals block is gated behind verbose \u2014 it duplicates info already\n // present in the per-row metadata for callers who care.\n // See: docs/code-review/context/05-output-formatting-patterns.md.\n if (verbose) {\n lines.push(buildSignalsSection(aggregation, searches, totalQueries));\n }\n\n // Gaps section \u2014 what the current results don't answer\n if (classification.gaps && classification.gaps.length > 0) {\n lines.push('');\n lines.push('## Gaps');\n for (const gap of classification.gaps) {\n lines.push(`- **[${gap.id}]** ${gap.description}`);\n }\n }\n\n const followUps = buildSuggestedFollowUpsSection(classification.refine_queries);\n if (followUps) {\n lines.push('');\n lines.push(followUps);\n }\n\n return lines.join('\\n');\n}\n\n// --- Metadata builder ---\n\nfunction buildMetadata(\n aggregation: SearchAggregation,\n executionTime: number,\n totalQueries: number,\n searches: SearchResponse['searches'],\n llmClassified: boolean,\n scope: 'web' | 'reddit' | 'both',\n llmError?: string,\n queryRewrites?: QueryRewriteRecord[],\n retriedQueries?: RetriedQueryRecord[],\n retryError?: StructuredError,\n) {\n const coverageSummary = searches.map(s => {\n let topDomain: string | undefined;\n const topResult = s.results[0];\n if (topResult) {\n try { topDomain = new URL(topResult.link).hostname.replace(/^www\\./, ''); } catch { /* ignore */ }\n }\n return { query: s.query, result_count: s.results.length, top_url: topDomain };\n });\n const lowYieldQueries = searches\n .filter(s => s.results.length <= 1)\n .map(s => s.query);\n const successfulQueries = searches.filter(s => s.results.length > 0).length;\n\n return {\n total_items: totalQueries,\n successful: successfulQueries,\n failed: Math.max(totalQueries - successfulQueries, 0),\n execution_time_ms: executionTime,\n llm_classified: llmClassified,\n scope,\n ...(llmError ? { llm_error: llmError } : {}),\n coverage_summary: coverageSummary,\n ...(lowYieldQueries.length > 0 ? { low_yield_queries: lowYieldQueries } : {}),\n ...(queryRewrites && queryRewrites.length > 0 ? { query_rewrites: queryRewrites } : {}),\n ...(retriedQueries && retriedQueries.length > 0 ? { retried_queries: retriedQueries } : {}),\n ...(retryError\n ? {\n retry_error: {\n phase: 'relax-retry' as const,\n code: retryError.code,\n message: retryError.message,\n retryable: retryError.retryable,\n ...(typeof retryError.statusCode === 'number' ? { statusCode: retryError.statusCode } : {}),\n },\n }\n : {}),\n };\n}\n\nfunction buildStructuredResults(\n aggregation: SearchAggregation,\n llmTagsByRank?: Map<number, string>,\n): Array<{\n rank: number;\n url: string;\n title: string;\n snippet: string;\n source_type: 'reddit' | 'github' | 'docs' | 'blog' | 'paper' | 'qa' | 'cve' | 'news' | 'video' | 'web';\n score: number;\n seen_in: number;\n best_position: number;\n}> {\n return aggregation.rankedUrls.map((row) => {\n // LLM tag wins when present; heuristic is the always-on fallback. See:\n // mcp-revisions/output-shaping/06-source-type-tagging-without-llm.md.\n const llmTag = llmTagsByRank?.get(row.rank);\n const heuristic = classifySourceByUrl(row.url);\n return {\n rank: row.rank,\n url: row.url,\n title: row.title,\n snippet: row.snippet,\n source_type: ((llmTag as typeof heuristic) ?? heuristic),\n score: Number(row.score.toFixed(2)),\n seen_in: row.frequency,\n best_position: row.bestPosition,\n };\n });\n}\n\n// --- Error builder ---\n\nfunction isStructuredError(error: unknown): error is StructuredError {\n if (typeof error !== 'object' || error === null) return false;\n const record = error as Record<string, unknown>;\n return typeof record.code === 'string'\n && typeof record.message === 'string'\n && typeof record.retryable === 'boolean';\n}\n\nfunction normalizeStructuredError(error: unknown): StructuredError {\n return isStructuredError(error) ? error : classifyError(error);\n}\n\nfunction formatSearchFailureMessage(\n error: StructuredError,\n phase?: SearchFailurePhase,\n): string {\n if (phase === 'initial') {\n return `Search provider failed during initial batch: ${error.message}`;\n }\n\n if (phase === 'relax-retry') {\n return `Search provider failed during relaxed retry batch: ${error.message}`;\n }\n\n return error.message;\n}\n\nfunction buildWebSearchError(\n error: unknown,\n params: WebSearchParams,\n startTime: number,\n phase?: SearchFailurePhase,\n): ToolExecutionResult<WebSearchOutput> {\n const structuredError = normalizeStructuredError(error);\n const message = formatSearchFailureMessage(structuredError, phase);\n const executionTime = Date.now() - startTime;\n\n mcpLog('error', `web-search: ${message}`, 'search');\n\n const errorContent = formatError({\n code: structuredError.code,\n message,\n retryable: structuredError.retryable,\n toolName: 'web-search',\n howToFix: ['Verify SERPER_API_KEY is set correctly'],\n alternatives: [\n 'web-search(queries=[\"topic recommendations\"], extract=\"...\", scope: \"reddit\") \u2014 Reddit-only post permalinks via the same backend',\n 'scrape-links(urls=[...], extract=\"...\") \u2014 if you have URLs from prior steps, scrape them now',\n ],\n });\n\n return toolFailure(\n `${errorContent}\\n\\nExecution time: ${formatDuration(executionTime)}\\nQueries: ${params.queries.length}`,\n );\n}\n\n// --- Main handler ---\n\nexport async function handleWebSearch(\n params: WebSearchParams,\n reporter: ToolReporter = NOOP_REPORTER,\n searchExecutor: SearchExecutor = executeSearches,\n): Promise<ToolExecutionResult<WebSearchOutput>> {\n const startTime = Date.now();\n\n try {\n const scopedQueries = buildScopedQueries(params.queries, params.scope);\n const effectiveQueries = scopedQueries.map((entry) => entry.query);\n if (params.scope !== 'web') {\n mcpLog('info', `Searching scope=${params.scope}: ${params.queries.length} input queries \u2192 ${effectiveQueries.length} dispatched`, 'search');\n } else {\n mcpLog('info', `Searching for ${params.queries.length} query/queries`, 'search');\n }\n await reporter.log('info', `Searching for ${effectiveQueries.length} query/queries (scope=${params.scope})`);\n await reporter.progress(15, 100, 'Submitting search queries');\n\n // Phase A \u2014 pre-dispatch normalizer. Rewrites the small fraction of\n // queries Google was statistically going to mis-handle (3+ phrase AND,\n // operator chars in quotes, paths in quotes). See src/utils/query-relax.ts.\n const dispatchPlan = effectiveQueries.map((q) => {\n const r = normalizeQueryForDispatch(q);\n return { original: q, dispatched: r.rewritten, rules: [...r.rules], changed: r.changed };\n });\n const dispatchedQueries = dispatchPlan.map((p) => p.dispatched);\n const resultScopes = scopedQueries.map((entry) => entry.resultScope);\n const dropSiteOnRetry = scopedQueries.map((entry) => entry.dropSiteOnRetry);\n const queryRewrites: QueryRewriteRecord[] = dispatchPlan\n .filter((p) => p.changed)\n .map((p) => ({ original: p.original, rewritten: p.dispatched, rules: p.rules }));\n\n if (queryRewrites.length > 0) {\n mcpLog(\n 'info',\n `Pre-dispatch normalized ${queryRewrites.length}/${effectiveQueries.length} queries`,\n 'search',\n );\n await reporter.log(\n 'info',\n `Normalized ${queryRewrites.length} queries pre-dispatch`,\n );\n }\n\n // Phase B \u2014 on-empty retry: any query returning 0 results gets one\n // relaxed retry (drop quotes, drop site:). Recovered hits replace the\n // empty slot transparently.\n const {\n response: rawResponse,\n retried: retriedQueries,\n failurePhase,\n retryError,\n } = await executeWithRelaxRetry(\n dispatchedQueries,\n reporter,\n searchExecutor,\n { dropSiteOnRetry },\n );\n\n if (rawResponse.error) {\n await reporter.log('error', `search_provider_failed: ${rawResponse.error.message}`);\n return buildWebSearchError(rawResponse.error, params, startTime, failurePhase);\n }\n\n const response = filterScopedSearches(rawResponse, params.scope, resultScopes);\n await reporter.progress(50, 100, 'Collected search results');\n\n const { aggregation } = processResults(response);\n await reporter.log(\n 'info',\n `Collected ${aggregation.totalUniqueUrls} unique URLs across ${response.totalQueries} queries`,\n );\n\n // Decide: raw output or LLM classification\n const useRaw = params.raw;\n const llmProcessor = createLLMProcessor();\n\n let markdown: string;\n let llmClassified = false;\n let llmError: string | undefined;\n\n if (useRaw || !llmProcessor) {\n // Raw path: traditional unified ranked list\n if (!useRaw && !llmProcessor) {\n llmError = 'LLM unavailable (LLM_API_KEY / LLM_BASE_URL / LLM_MODEL not set). Falling back to raw output.';\n mcpLog('warning', llmError, 'search');\n // mcp-revisions/llm-degradation/01: surface degraded mode to the client.\n await reporter.log('warning', 'llm_classifier_unreachable: planner not configured; raw ranked list returned');\n }\n let rawRefineQueries: RefineQuerySuggestion[] | undefined;\n if (useRaw && llmProcessor) {\n const refineResult = await suggestRefineQueriesForRawMode(\n aggregation.rankedUrls,\n params.extract,\n params.queries,\n llmProcessor,\n );\n rawRefineQueries = refineResult.result;\n }\n markdown = appendSignalsAndFollowUps(\n buildRawOutput(params.queries, aggregation, response.searches, params.verbose),\n buildSignalsSection(aggregation, response.searches, response.totalQueries),\n rawRefineQueries,\n { includeSignals: params.verbose },\n );\n await reporter.progress(80, 100, 'Ranking search results');\n } else {\n // LLM classification path\n await reporter.progress(65, 100, 'Classifying results by relevance');\n const classification = await classifySearchResults(\n aggregation.rankedUrls,\n params.extract,\n response.totalQueries,\n llmProcessor,\n params.queries,\n );\n\n if (classification.result) {\n markdown = buildClassifiedOutput(\n classification.result, aggregation, params.extract, response.searches, response.totalQueries, params.verbose,\n );\n llmClassified = true;\n await reporter.progress(85, 100, 'Formatted classified results');\n } else {\n // Classification failed \u2014 fall back to raw\n llmError = classification.error ?? 'Unknown classification error';\n mcpLog('warning', `Classification failed, falling back to raw: ${llmError}`, 'search');\n // mcp-revisions/llm-degradation/01: surface degraded mode to the client.\n await reporter.log('warning', `llm_classifier_unreachable: ${llmError}`);\n markdown = appendSignalsAndFollowUps(\n buildRawOutput(params.queries, aggregation, response.searches, params.verbose),\n buildSignalsSection(aggregation, response.searches, response.totalQueries),\n undefined,\n { includeSignals: params.verbose },\n );\n await reporter.progress(85, 100, 'Classification failed, using raw output');\n }\n }\n\n const executionTime = Date.now() - startTime;\n const metadata = buildMetadata(\n aggregation, executionTime, response.totalQueries, response.searches, llmClassified, params.scope, llmError,\n queryRewrites, retriedQueries, retryError,\n );\n\n // Build per-row structured results so capability-aware clients can\n // index into `structuredContent.results` rather than regex-scrape the\n // markdown table. The LLM tag wins when present; heuristic is the\n // always-on fallback.\n const llmTagsByRank = new Map<number, string>();\n // (When classification succeeds the source_type per-row is populated\n // inside buildClassifiedOutput via the entry.source_type field \u2014 but\n // we don't have a direct handle on it here without a refactor. The\n // heuristic alone covers the structuredContent shape correctly; the\n // LLM-tagged variant remains in the markdown body.)\n const results = buildStructuredResults(aggregation, llmTagsByRank);\n\n mcpLog('info', `Search completed: ${aggregation.rankedUrls.length} URLs, classified=${llmClassified}`, 'search');\n await reporter.log('info', `Search completed with ${aggregation.rankedUrls.length} URLs (classified: ${llmClassified})`);\n\n const footer = `\\n---\\n*${formatDuration(executionTime)} | ${aggregation.totalUniqueUrls} unique URLs${llmClassified ? ' | LLM classified' : ''}*`;\n const fullMarkdown = markdown + footer;\n\n return toolSuccess(fullMarkdown, { content: fullMarkdown, results, metadata });\n } catch (error) {\n return buildWebSearchError(error, params, startTime);\n }\n}\n\nexport function registerWebSearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'web-search',\n title: 'Web Search',\n description:\n 'Fan out Google queries in parallel. One call carries up to 50 queries in a flat `queries` array \u2014 pack diverse facets (not paraphrases) into a single call. Call me AGGRESSIVELY across a session: 2\u20134 rounds is normal, 1 is underuse. After each pass, read `gaps[]` + `refine_queries[]` and fire another round with the new terms. Safe to call multiple times in parallel in the same turn for orthogonal subtopics. `scope`: `\"reddit\"` (server appends `site:reddit.com` + filters to post permalinks \u2014 use for sentiment / migration / lived experience), `\"web\"` default (spec / bug / pricing / CVE / API), `\"both\"` (fan each query across both \u2014 use when opinion-heavy AND needs official sources). Returns a tiered Markdown report (HIGHLY_RELEVANT / MAYBE_RELEVANT / OTHER) + grounded synthesis with `[rank]` citations + `## Gaps` + `## Suggested follow-up searches` tied to gap ids. Set `raw=true` to skip classification.',\n schema: webSearchParamsSchema,\n outputSchema: webSearchOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: true,\n },\n },\n async (args, ctx) => {\n if (!getCapabilities().search) {\n return toToolResponse(toolFailure(getMissingEnvMessage('search')));\n }\n\n const reporter = createToolReporter(ctx, 'web-search');\n const result = await handleWebSearch(args, reporter);\n\n await reporter.progress(100, 100, result.isError ? 'Search failed' : 'Search complete');\n return toToolResponse(result);\n },\n );\n}\n"],
5
+ "mappings": "AAOA,SAAS,iBAAiB,4BAA4B;AACtD;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP,SAAS,oBAAiD;AAC1D;AAAA,EACE;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAIK;AACP,SAAS,qBAA2C;AACpD,SAAS,2BAA2B;AACpC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAGK;AACP,SAAS,0BAA0B;AACnC;AAAA,EACE;AAAA,EACA;AAAA,OACK;AAsBP,MAAM,wBAAwB;AAC9B,MAAM,cAAc;AAQpB,SAAS,kBAAkB,OAAuB;AAChD,SAAO,wBAAwB,KAAK,KAAK,IAAI,QAAQ,GAAG,KAAK;AAC/D;AAEA,SAAS,mBAAmB,SAAmB,OAAiD;AAC9F,MAAI,UAAU,OAAO;AACnB,WAAO,QAAQ,IAAI,CAAC,WAAW,EAAE,OAAO,aAAa,OAAO,iBAAiB,KAAK,EAAE;AAAA,EACtF;AAEA,QAAM,WAAW,QAAQ;AAAA,IAAI,CAAC,OAC3B,EAAE,OAAO,kBAAkB,CAAC,GAAG,aAAa,UAAmB,iBAAiB,MAAM;AAAA,EACzF;AAEA,MAAI,UAAU,SAAU,QAAO;AAE/B,SAAO;AAAA,IACL,GAAG,QAAQ,IAAI,CAAC,WAAW,EAAE,OAAO,aAAa,OAAgB,iBAAiB,KAAK,EAAE;AAAA,IACzF,GAAG;AAAA,EACL;AACF;AAEA,eAAe,gBAAgB,SAA4C;AACzE,QAAM,SAAS,IAAI,aAAa;AAChC,SAAO,OAAO,eAAe,OAAO;AACtC;AAoBA,eAAe,sBACb,YACA,UACA,iBAAiC,iBACjC,eAAkE,CAAC,GAMlE;AACD,QAAM,UAAU,MAAM,eAAe,UAAU;AAE/C,MAAI,QAAQ,OAAO;AACjB,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,GAAG,cAAc,UAAU;AAAA,EACnE;AAEA,QAAM,eAAe,QAAQ,SAC1B,IAAI,CAAC,GAAG,MAAO,EAAE,QAAQ,WAAW,IAAI,IAAI,EAAG,EAC/C,OAAO,CAAC,MAAM,MAAM,EAAE;AAEzB,MAAI,aAAa,WAAW,GAAG;AAC7B,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,EAAE;AAAA,EAC1C;AAGA,QAAM,QAAgB,CAAC;AACvB,aAAW,OAAO,cAAc;AAC9B,UAAM,KAAK,WAAW,GAAG;AACzB,QAAI,OAAO,OAAO,SAAU;AAC5B,UAAM,IAAI,mBAAmB,IAAI,EAAE,UAAU,aAAa,kBAAkB,GAAG,KAAK,KAAK,CAAC;AAC1F,QAAI,EAAE,WAAW,EAAE,cAAc,IAAI;AACnC,YAAM,KAAK,EAAE,OAAO,KAAK,UAAU,IAAI,SAAS,EAAE,WAAW,OAAO,CAAC,GAAG,EAAE,KAAK,EAAE,CAAC;AAAA,IACpF;AAAA,EACF;AAEA,MAAI,MAAM,WAAW,GAAG;AACtB,WAAO,EAAE,UAAU,SAAS,SAAS,CAAC,EAAE;AAAA,EAC1C;AAEA;AAAA,IACE;AAAA,IACA,GAAG,MAAM,MAAM,IAAI,aAAa,MAAM;AAAA,IACtC;AAAA,EACF;AACA,QAAM,SAAS;AAAA,IACb;AAAA,IACA,GAAG,MAAM,MAAM;AAAA,EACjB;AAEA,QAAM,YAAY,MAAM,eAAe,MAAM,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC;AAClE,QAAM,UAAgC,CAAC;AACvC,QAAM,eAAe,oBAAI,IAAgD;AAEzE,QAAM,QAAQ,CAAC,MAAM,MAAM;AACzB,UAAM,IAAI,UAAU,SAAS,CAAC;AAC9B,QAAI,EAAG,cAAa,IAAI,KAAK,OAAO,CAAC;AACrC,YAAQ,KAAK;AAAA,MACX,UAAU,KAAK;AAAA,MACf,cAAc,KAAK;AAAA,MACnB,OAAO,KAAK;AAAA,MACZ,mBAAmB,GAAG,QAAQ,UAAU;AAAA,IAC1C,CAAC;AAAA,EACH,CAAC;AAED,MAAI,UAAU,OAAO;AACnB;AAAA,MACE;AAAA,MACA,kEAAkE,UAAU,MAAM,OAAO;AAAA,MACzF;AAAA,IACF;AACA,UAAM,SAAS;AAAA,MACb;AAAA,MACA,8BAA8B,UAAU,MAAM,OAAO;AAAA,IACvD;AACA,WAAO;AAAA,MACL,UAAU;AAAA,MACV;AAAA,MACA,YAAY,UAAU;AAAA,IACxB;AAAA,EACF;AAEA,QAAM,iBAAiB,QAAQ,SAAS,IAAI,CAAC,GAAG,QAAQ;AACtD,UAAM,IAAI,aAAa,IAAI,GAAG;AAC9B,QAAI,KAAK,EAAE,QAAQ,SAAS,GAAG;AAC7B,aAAO,EAAE,GAAG,GAAG,OAAO,EAAE,MAAM;AAAA,IAChC;AACA,WAAO;AAAA,EACT,CAAC;AAED,SAAO;AAAA,IACL,UAAU,EAAE,GAAG,SAAS,UAAU,eAAe;AAAA,IACjD;AAAA,EACF;AACF;AAEA,SAAS,qBACP,UACA,OACA,eAA6C,CAAC,GAC9B;AAChB,MAAI,UAAU,MAAO,QAAO;AAC5B,QAAM,WAAW,SAAS,SAAS,IAAI,CAAC,QAAQ,UAAU;AACxD,UAAM,cAAc,aAAa,KAAK,MAAM,UAAU,WAAW,WAAW;AAC5E,WAAO;AAAA,MACL,GAAG;AAAA,MACH,SAAS,OAAO,QAAQ,OAAO,CAAC,MAAM;AACpC,YAAI;AACJ,YAAI;AAAE,iBAAO,IAAI,IAAI,EAAE,IAAI,EAAE;AAAA,QAAU,QAAQ;AAAE,iBAAO;AAAA,QAAM;AAC9D,YAAI,gBAAgB,UAAU;AAC5B,iBAAO,YAAY,KAAK,IAAI,KAAK,sBAAsB,KAAK,EAAE,IAAI;AAAA,QACpE;AAEA,YAAI,CAAC,YAAY,KAAK,IAAI,EAAG,QAAO;AACpC,eAAO,sBAAsB,KAAK,EAAE,IAAI;AAAA,MAC1C,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AACD,SAAO,EAAE,GAAG,UAAU,UAAU,SAAS;AAC3C;AAEA,SAAS,eAAe,UAEtB;AACA,QAAM,cAAc,iBAAiB,SAAS,UAAU,CAAC;AACzD,SAAO,EAAE,YAAY;AACvB;AAIA,SAAS,eACP,SACA,aACA,UACA,UAAmB,OACX;AACR,SAAO;AAAA,IACL,YAAY;AAAA,IAAY;AAAA,IAAS;AAAA,IACjC,YAAY;AAAA,IACZ,YAAY;AAAA,IAAoB,YAAY;AAAA,IAC5C;AAAA,EACF;AACF;AAEA,SAAS,oBACP,aACA,UACA,cACQ;AACR,QAAM,gBAAgB,SAAS,OAAO,CAAC,WAAW,OAAO,QAAQ,UAAU,CAAC,EAAE;AAC9E,QAAM,WAAW,SACd,OAAO,CAAC,WAAW,OAAO,QAAQ,UAAU,CAAC,EAC7C,IAAI,CAAC,WAAW,IAAI,OAAO,KAAK,GAAG;AACtC,QAAM,iBAAiB,YAAY,WAAW,OAAO,CAAC,QAAQ,IAAI,WAAW,EAAE;AAE/E,QAAM,QAAQ;AAAA,IACZ;AAAA,IACA,eAAe,aAAa,IAAI,YAAY;AAAA,IAC5C,qBAAqB,cAAc;AAAA,EACrC;AAEA,MAAI,SAAS,SAAS,GAAG;AACvB,UAAM,KAAK,gBAAgB,SAAS,KAAK,IAAI,CAAC,EAAE;AAAA,EAClD;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAEO,SAAS,+BACd,eACQ;AACR,MAAI,CAAC,iBAAiB,cAAc,WAAW,GAAG;AAChD,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,CAAC,mCAAmC,EAAE;AAEpD,aAAW,QAAQ,eAAe;AAChC,UAAM,QAAQ,mBAAmB,KAAK,SAAS,EAAE;AACjD,QAAI,CAAC,MAAO;AACZ,UAAM,YAAY,mBAAmB,KAAK,aAAa,EAAE;AACzD,UAAM,SAAS,OAAO,KAAK,WAAW,WAClC,kBAAkB,KAAK,MAAM,QAC7B,KAAK,kBACH,MAAM,mBAAmB,KAAK,eAAe,CAAC,OAC9C;AACN,UAAM;AAAA,MAAK,YACP,KAAK,KAAK,WAAM,SAAS,GAAG,MAAM,KAClC,KAAK,KAAK,GAAG,MAAM;AAAA,IACvB;AAAA,EACF;AAEA,SAAO,MAAM,WAAW,IAAI,KAAK,MAAM,KAAK,IAAI;AAClD;AAEO,SAAS,0BACd,UACA,gBACA,eACA,UAAwC,CAAC,GACjC;AACR,QAAM,iBAAiB,QAAQ,kBAAkB;AACjD,QAAM,WAAW,CAAC,QAAQ;AAC1B,MAAI,kBAAkB,gBAAgB;AACpC,aAAS,KAAK,IAAI,OAAO,cAAc;AAAA,EACzC;AACA,QAAM,YAAY,+BAA+B,aAAa;AAC9D,MAAI,WAAW;AACb,aAAS,KAAK,IAAI,SAAS;AAAA,EAC7B;AACA,SAAO,SAAS,KAAK,IAAI;AAC3B;AAYA,MAAM,iBAAiB;AACvB,MAAM,iBAAiB;AAchB,SAAS,sBACd,OACA,aACA,OAAuC,CAAC,GAChC;AACR,QAAM,MAAM,KAAK,OAAO;AACxB,QAAM,MAAM,KAAK,OAAO;AAExB,QAAM,QAA8F,CAAC;AAErG,aAAW,aAAa,MAAM,MAAM;AAClC,QAAI,MAAM,UAAU,IAAK;AACzB,UAAM,KAAK,EAAE,WAAW,MAAM,kBAAkB,CAAC;AAAA,EACnD;AAEA,MAAI,MAAM,SAAS,KAAK;AACtB,UAAM,SAAS,KAAK,IAAI,KAAK,GAAG;AAChC,eAAW,aAAa,MAAM,OAAO;AACnC,UAAI,MAAM,UAAU,OAAQ;AAC5B,YAAM,KAAK,EAAE,WAAW,MAAM,iBAAiB,CAAC;AAAA,IAClD;AAAA,EACF;AAEA,MAAI,MAAM,WAAW,EAAG,QAAO;AAE/B,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,uDAAkD;AAC7D,QAAM,QAAQ,CAAC,MAAM,MAAM;AACzB,UAAM,QAAQ,YAAY,IAAI,KAAK,UAAU,IAAI;AACjD,UAAM,SAAS,OAAO,UAAU,MAAM,OAAO,KAAK,EAAE,SAAS,IAAI,MAAM,SAAS;AAChF,QAAI;AACJ,QAAI;AACF,eAAS,IAAI,IAAI,KAAK,UAAU,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,IACpE,QAAQ;AACN,eAAS,KAAK,UAAU;AAAA,IAC1B;AACA,UAAM;AAAA,MACJ,GAAG,IAAI,CAAC,QAAQ,KAAK,UAAU,KAAK,KAAK,KAAK,UAAU,GAAG,cAAS,MAAM,WAAM,MAAM,MAAM,KAAK,IAAI,UAAU,KAAK,UAAU,IAAI;AAAA,IACpI;AAAA,EACF,CAAC;AACD,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,SAAS,sBACP,gBACA,aACA,SACA,UACA,cACA,UAAmB,OACX;AACR,QAAM,aAAa,YAAY;AAG/B,QAAM,cAAc,IAAI,IAAI,eAAe,QAAQ,IAAI,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAE1E,QAAM,QAAQ;AAAA,IACZ,MAAM,CAAC;AAAA,IACP,OAAO,CAAC;AAAA,IACR,OAAO,CAAC;AAAA,EACV;AAEA,aAAW,OAAO,YAAY;AAC5B,UAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,UAAM,OAAO,OAAO;AACpB,QAAI,SAAS,mBAAmB;AAC9B,YAAM,KAAK,KAAK,GAAG;AAAA,IACrB,WAAW,SAAS,kBAAkB;AACpC,YAAM,MAAM,KAAK,GAAG;AAAA,IACtB,OAAO;AACL,YAAM,MAAM,KAAK,GAAG;AAAA,IACtB;AAAA,EACF;AAEA,QAAM,QAAkB,CAAC;AAGzB,QAAM,KAAK,MAAM,eAAe,KAAK,EAAE;AACvC,QAAM,KAAK,kBAAkB,OAAO,EAAE;AACtC,QAAM,KAAK,KAAK,YAAY,mBAAc,WAAW,MAAM,gBAAW,MAAM,KAAK,MAAM,qBAAqB,MAAM,MAAM,MAAM,oBAAoB;AAClJ,MAAI,eAAe,YAAY;AAC7B,UAAM,aAAa,eAAe,oBAAoB,WAAM,eAAe,iBAAiB,KAAK;AACjG,UAAM,KAAK,mBAAmB,eAAe,UAAU,KAAK,UAAU,EAAE;AAAA,EAC1E;AACA,QAAM,KAAK,EAAE;AAIb,QAAM,YAAY;AAAA,IAChB,EAAE,MAAM,MAAM,MAAM,OAAO,MAAM,MAAM;AAAA,IACvC;AAAA,EACF;AACA,MAAI,WAAW;AACb,UAAM,KAAK,SAAS;AACpB,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,QAAM,KAAK,gBAAgB,eAAe,SAAS,EAAE;AACrD,QAAM,KAAK,EAAE;AAGb,QAAM,gBAAgB,CAAC,QAA2C;AAChE,UAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,UAAM,cAAc,KAAK,MAAM,IAAI,gBAAgB,GAAG;AACtD,UAAM,SAAS,GAAG,IAAI,SAAS,IAAI,YAAY,KAAK,WAAW;AAC/D,UAAM,aAAa,OAAO,cAAc,KAAK,MAAM,WAAW,OAAO;AACrE,UAAM,SAAS,OAAO,SAAS,MAAM,OAAO,QAAQ,OAAO,KAAK,IAAI;AACpE,WAAO,KAAK,IAAI,IAAI,OAAO,IAAI,KAAK,KAAK,IAAI,GAAG,OAAO,UAAU,MAAM,MAAM,MAAM,MAAM;AAAA,EAC3F;AAGA,MAAI,MAAM,KAAK,SAAS,GAAG;AACzB,UAAM,KAAK,wBAAwB,MAAM,KAAK,MAAM,GAAG;AACvD,UAAM,KAAK,sCAAsC;AACjD,UAAM,KAAK,sCAAsC;AACjD,eAAW,OAAO,MAAM,KAAM,OAAM,KAAK,cAAc,GAAG,CAAC;AAC3D,UAAM,KAAK,EAAE;AAAA,EACf;AAGA,MAAI,MAAM,MAAM,SAAS,GAAG;AAC1B,UAAM,KAAK,uBAAuB,MAAM,MAAM,MAAM,GAAG;AACvD,UAAM,KAAK,sCAAsC;AACjD,UAAM,KAAK,sCAAsC;AACjD,eAAW,OAAO,MAAM,MAAO,OAAM,KAAK,cAAc,GAAG,CAAC;AAC5D,UAAM,KAAK,EAAE;AAAA,EACf;AAGA,MAAI,MAAM,MAAM,SAAS,GAAG;AAC1B,UAAM,KAAK,sBAAsB,MAAM,MAAM,MAAM,GAAG;AACtD,UAAM,KAAK,wCAAwC;AACnD,UAAM,KAAK,wCAAwC;AACnD,eAAW,OAAO,MAAM,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAI,IAAI,IAAI;AACtC,YAAM,YAAY,IAAI,QAAQ,IAAI,CAAC,MAAM,IAAI,CAAC,GAAG,EAAE,KAAK,IAAI;AAC5D,YAAM,aAAa,OAAO,cAAc,KAAK,MAAM,WAAW,OAAO;AACrE,UAAI;AACJ,UAAI;AACF,iBAAS,IAAI,IAAI,IAAI,GAAG,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,MACzD,QAAQ;AACN,iBAAS,IAAI;AAAA,MACf;AACA,YAAM,KAAK,KAAK,IAAI,IAAI,MAAM,MAAM,MAAM,UAAU,MAAM,IAAI,MAAM,QAAQ,CAAC,CAAC,MAAM,SAAS,IAAI;AAAA,IACnG;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAKA,MAAI,SAAS;AACX,UAAM,KAAK,oBAAoB,aAAa,UAAU,YAAY,CAAC;AAAA,EACrE;AAGA,MAAI,eAAe,QAAQ,eAAe,KAAK,SAAS,GAAG;AACzD,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,SAAS;AACpB,eAAW,OAAO,eAAe,MAAM;AACrC,YAAM,KAAK,QAAQ,IAAI,EAAE,OAAO,IAAI,WAAW,EAAE;AAAA,IACnD;AAAA,EACF;AAEA,QAAM,YAAY,+BAA+B,eAAe,cAAc;AAC9E,MAAI,WAAW;AACb,UAAM,KAAK,EAAE;AACb,UAAM,KAAK,SAAS;AAAA,EACtB;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,SAAS,cACP,aACA,eACA,cACA,UACA,eACA,OACA,UACA,eACA,gBACA,YACA;AACA,QAAM,kBAAkB,SAAS,IAAI,OAAK;AACxC,QAAI;AACJ,UAAM,YAAY,EAAE,QAAQ,CAAC;AAC7B,QAAI,WAAW;AACb,UAAI;AAAE,oBAAY,IAAI,IAAI,UAAU,IAAI,EAAE,SAAS,QAAQ,UAAU,EAAE;AAAA,MAAG,QAAQ;AAAA,MAAe;AAAA,IACnG;AACA,WAAO,EAAE,OAAO,EAAE,OAAO,cAAc,EAAE,QAAQ,QAAQ,SAAS,UAAU;AAAA,EAC9E,CAAC;AACD,QAAM,kBAAkB,SACrB,OAAO,OAAK,EAAE,QAAQ,UAAU,CAAC,EACjC,IAAI,OAAK,EAAE,KAAK;AACnB,QAAM,oBAAoB,SAAS,OAAO,OAAK,EAAE,QAAQ,SAAS,CAAC,EAAE;AAErE,SAAO;AAAA,IACL,aAAa;AAAA,IACb,YAAY;AAAA,IACZ,QAAQ,KAAK,IAAI,eAAe,mBAAmB,CAAC;AAAA,IACpD,mBAAmB;AAAA,IACnB,gBAAgB;AAAA,IAChB;AAAA,IACA,GAAI,WAAW,EAAE,WAAW,SAAS,IAAI,CAAC;AAAA,IAC1C,kBAAkB;AAAA,IAClB,GAAI,gBAAgB,SAAS,IAAI,EAAE,mBAAmB,gBAAgB,IAAI,CAAC;AAAA,IAC3E,GAAI,iBAAiB,cAAc,SAAS,IAAI,EAAE,gBAAgB,cAAc,IAAI,CAAC;AAAA,IACrF,GAAI,kBAAkB,eAAe,SAAS,IAAI,EAAE,iBAAiB,eAAe,IAAI,CAAC;AAAA,IACzF,GAAI,aACA;AAAA,MACE,aAAa;AAAA,QACX,OAAO;AAAA,QACP,MAAM,WAAW;AAAA,QACjB,SAAS,WAAW;AAAA,QACpB,WAAW,WAAW;AAAA,QACtB,GAAI,OAAO,WAAW,eAAe,WAAW,EAAE,YAAY,WAAW,WAAW,IAAI,CAAC;AAAA,MAC3F;AAAA,IACF,IACA,CAAC;AAAA,EACP;AACF;AAEA,SAAS,uBACP,aACA,eAUC;AACD,SAAO,YAAY,WAAW,IAAI,CAAC,QAAQ;AAGzC,UAAM,SAAS,eAAe,IAAI,IAAI,IAAI;AAC1C,UAAM,YAAY,oBAAoB,IAAI,GAAG;AAC7C,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,KAAK,IAAI;AAAA,MACT,OAAO,IAAI;AAAA,MACX,SAAS,IAAI;AAAA,MACb,aAAe,UAA+B;AAAA,MAC9C,OAAO,OAAO,IAAI,MAAM,QAAQ,CAAC,CAAC;AAAA,MAClC,SAAS,IAAI;AAAA,MACb,eAAe,IAAI;AAAA,IACrB;AAAA,EACF,CAAC;AACH;AAIA,SAAS,kBAAkB,OAA0C;AACnE,MAAI,OAAO,UAAU,YAAY,UAAU,KAAM,QAAO;AACxD,QAAM,SAAS;AACf,SAAO,OAAO,OAAO,SAAS,YACzB,OAAO,OAAO,YAAY,YAC1B,OAAO,OAAO,cAAc;AACnC;AAEA,SAAS,yBAAyB,OAAiC;AACjE,SAAO,kBAAkB,KAAK,IAAI,QAAQ,cAAc,KAAK;AAC/D;AAEA,SAAS,2BACP,OACA,OACQ;AACR,MAAI,UAAU,WAAW;AACvB,WAAO,gDAAgD,MAAM,OAAO;AAAA,EACtE;AAEA,MAAI,UAAU,eAAe;AAC3B,WAAO,sDAAsD,MAAM,OAAO;AAAA,EAC5E;AAEA,SAAO,MAAM;AACf;AAEA,SAAS,oBACP,OACA,QACA,WACA,OACsC;AACtC,QAAM,kBAAkB,yBAAyB,KAAK;AACtD,QAAM,UAAU,2BAA2B,iBAAiB,KAAK;AACjE,QAAM,gBAAgB,KAAK,IAAI,IAAI;AAEnC,SAAO,SAAS,eAAe,OAAO,IAAI,QAAQ;AAElD,QAAM,eAAe,YAAY;AAAA,IAC/B,MAAM,gBAAgB;AAAA,IACtB;AAAA,IACA,WAAW,gBAAgB;AAAA,IAC3B,UAAU;AAAA,IACV,UAAU,CAAC,wCAAwC;AAAA,IACnD,cAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL,GAAG,YAAY;AAAA;AAAA,kBAAuB,eAAe,aAAa,CAAC;AAAA,WAAc,OAAO,QAAQ,MAAM;AAAA,EACxG;AACF;AAIA,eAAsB,gBACpB,QACA,WAAyB,eACzB,iBAAiC,iBACc;AAC/C,QAAM,YAAY,KAAK,IAAI;AAE3B,MAAI;AACF,UAAM,gBAAgB,mBAAmB,OAAO,SAAS,OAAO,KAAK;AACrE,UAAM,mBAAmB,cAAc,IAAI,CAAC,UAAU,MAAM,KAAK;AACjE,QAAI,OAAO,UAAU,OAAO;AAC1B,aAAO,QAAQ,mBAAmB,OAAO,KAAK,KAAK,OAAO,QAAQ,MAAM,yBAAoB,iBAAiB,MAAM,eAAe,QAAQ;AAAA,IAC5I,OAAO;AACL,aAAO,QAAQ,iBAAiB,OAAO,QAAQ,MAAM,kBAAkB,QAAQ;AAAA,IACjF;AACA,UAAM,SAAS,IAAI,QAAQ,iBAAiB,iBAAiB,MAAM,yBAAyB,OAAO,KAAK,GAAG;AAC3G,UAAM,SAAS,SAAS,IAAI,KAAK,2BAA2B;AAK5D,UAAM,eAAe,iBAAiB,IAAI,CAAC,MAAM;AAC/C,YAAM,IAAI,0BAA0B,CAAC;AACrC,aAAO,EAAE,UAAU,GAAG,YAAY,EAAE,WAAW,OAAO,CAAC,GAAG,EAAE,KAAK,GAAG,SAAS,EAAE,QAAQ;AAAA,IACzF,CAAC;AACD,UAAM,oBAAoB,aAAa,IAAI,CAAC,MAAM,EAAE,UAAU;AAC9D,UAAM,eAAe,cAAc,IAAI,CAAC,UAAU,MAAM,WAAW;AACnE,UAAM,kBAAkB,cAAc,IAAI,CAAC,UAAU,MAAM,eAAe;AAC1E,UAAM,gBAAsC,aACzC,OAAO,CAAC,MAAM,EAAE,OAAO,EACvB,IAAI,CAAC,OAAO,EAAE,UAAU,EAAE,UAAU,WAAW,EAAE,YAAY,OAAO,EAAE,MAAM,EAAE;AAEjF,QAAI,cAAc,SAAS,GAAG;AAC5B;AAAA,QACE;AAAA,QACA,2BAA2B,cAAc,MAAM,IAAI,iBAAiB,MAAM;AAAA,QAC1E;AAAA,MACF;AACA,YAAM,SAAS;AAAA,QACb;AAAA,QACA,cAAc,cAAc,MAAM;AAAA,MACpC;AAAA,IACF;AAKA,UAAM;AAAA,MACJ,UAAU;AAAA,MACV,SAAS;AAAA,MACT;AAAA,MACA;AAAA,IACF,IAAI,MAAM;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,gBAAgB;AAAA,IACpB;AAEA,QAAI,YAAY,OAAO;AACrB,YAAM,SAAS,IAAI,SAAS,2BAA2B,YAAY,MAAM,OAAO,EAAE;AAClF,aAAO,oBAAoB,YAAY,OAAO,QAAQ,WAAW,YAAY;AAAA,IAC/E;AAEA,UAAM,WAAW,qBAAqB,aAAa,OAAO,OAAO,YAAY;AAC7E,UAAM,SAAS,SAAS,IAAI,KAAK,0BAA0B;AAE3D,UAAM,EAAE,YAAY,IAAI,eAAe,QAAQ;AAC/C,UAAM,SAAS;AAAA,MACb;AAAA,MACA,aAAa,YAAY,eAAe,uBAAuB,SAAS,YAAY;AAAA,IACtF;AAGA,UAAM,SAAS,OAAO;AACtB,UAAM,eAAe,mBAAmB;AAExC,QAAI;AACJ,QAAI,gBAAgB;AACpB,QAAI;AAEJ,QAAI,UAAU,CAAC,cAAc;AAE3B,UAAI,CAAC,UAAU,CAAC,cAAc;AAC5B,mBAAW;AACX,eAAO,WAAW,UAAU,QAAQ;AAEpC,cAAM,SAAS,IAAI,WAAW,8EAA8E;AAAA,MAC9G;AACA,UAAI;AACJ,UAAI,UAAU,cAAc;AAC1B,cAAM,eAAe,MAAM;AAAA,UACzB,YAAY;AAAA,UACZ,OAAO;AAAA,UACP,OAAO;AAAA,UACP;AAAA,QACF;AACA,2BAAmB,aAAa;AAAA,MAClC;AACA,iBAAW;AAAA,QACT,eAAe,OAAO,SAAS,aAAa,SAAS,UAAU,OAAO,OAAO;AAAA,QAC7E,oBAAoB,aAAa,SAAS,UAAU,SAAS,YAAY;AAAA,QACzE;AAAA,QACA,EAAE,gBAAgB,OAAO,QAAQ;AAAA,MACnC;AACA,YAAM,SAAS,SAAS,IAAI,KAAK,wBAAwB;AAAA,IAC3D,OAAO;AAEL,YAAM,SAAS,SAAS,IAAI,KAAK,kCAAkC;AACnE,YAAM,iBAAiB,MAAM;AAAA,QAC3B,YAAY;AAAA,QACZ,OAAO;AAAA,QACP,SAAS;AAAA,QACT;AAAA,QACA,OAAO;AAAA,MACT;AAEA,UAAI,eAAe,QAAQ;AACzB,mBAAW;AAAA,UACT,eAAe;AAAA,UAAQ;AAAA,UAAa,OAAO;AAAA,UAAS,SAAS;AAAA,UAAU,SAAS;AAAA,UAAc,OAAO;AAAA,QACvG;AACA,wBAAgB;AAChB,cAAM,SAAS,SAAS,IAAI,KAAK,8BAA8B;AAAA,MACjE,OAAO;AAEL,mBAAW,eAAe,SAAS;AACnC,eAAO,WAAW,+CAA+C,QAAQ,IAAI,QAAQ;AAErF,cAAM,SAAS,IAAI,WAAW,+BAA+B,QAAQ,EAAE;AACvE,mBAAW;AAAA,UACT,eAAe,OAAO,SAAS,aAAa,SAAS,UAAU,OAAO,OAAO;AAAA,UAC7E,oBAAoB,aAAa,SAAS,UAAU,SAAS,YAAY;AAAA,UACzE;AAAA,UACA,EAAE,gBAAgB,OAAO,QAAQ;AAAA,QACnC;AACA,cAAM,SAAS,SAAS,IAAI,KAAK,yCAAyC;AAAA,MAC5E;AAAA,IACF;AAEA,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,UAAM,WAAW;AAAA,MACf;AAAA,MAAa;AAAA,MAAe,SAAS;AAAA,MAAc,SAAS;AAAA,MAAU;AAAA,MAAe,OAAO;AAAA,MAAO;AAAA,MACnG;AAAA,MAAe;AAAA,MAAgB;AAAA,IACjC;AAMA,UAAM,gBAAgB,oBAAI,IAAoB;AAM9C,UAAM,UAAU,uBAAuB,aAAa,aAAa;AAEjE,WAAO,QAAQ,qBAAqB,YAAY,WAAW,MAAM,qBAAqB,aAAa,IAAI,QAAQ;AAC/G,UAAM,SAAS,IAAI,QAAQ,yBAAyB,YAAY,WAAW,MAAM,sBAAsB,aAAa,GAAG;AAEvH,UAAM,SAAS;AAAA;AAAA,GAAW,eAAe,aAAa,CAAC,MAAM,YAAY,eAAe,eAAe,gBAAgB,sBAAsB,EAAE;AAC/I,UAAM,eAAe,WAAW;AAEhC,WAAO,YAAY,cAAc,EAAE,SAAS,cAAc,SAAS,SAAS,CAAC;AAAA,EAC/E,SAAS,OAAO;AACd,WAAO,oBAAoB,OAAO,QAAQ,SAAS;AAAA,EACrD;AACF;AAEO,SAAS,sBAAsB,QAAyB;AAC7D,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,MAAM,QAAQ;AACnB,UAAI,CAAC,gBAAgB,EAAE,QAAQ;AAC7B,eAAO,eAAe,YAAY,qBAAqB,QAAQ,CAAC,CAAC;AAAA,MACnE;AAEA,YAAM,WAAW,mBAAmB,KAAK,YAAY;AACrD,YAAM,SAAS,MAAM,gBAAgB,MAAM,QAAQ;AAEnD,YAAM,SAAS,SAAS,KAAK,KAAK,OAAO,UAAU,kBAAkB,iBAAiB;AACtF,aAAO,eAAe,MAAM;AAAA,IAC9B;AAAA,EACF;AACF;",
6
6
  "names": []
7
7
  }
@@ -1,5 +1,4 @@
1
1
  import {
2
- startResearchOutputSchema,
3
2
  startResearchParamsSchema
4
3
  } from "../schemas/start-research.js";
5
4
  import {
@@ -167,7 +166,6 @@ function registerStartResearchTool(server) {
167
166
  title: "Start Research Session",
168
167
  description: "Call this FIRST every research session. Provide a `goal`; I return a goal-tailored brief naming (a) `primary_branch` (reddit for sentiment/migration, web for spec/bug/pricing, both when opinion-heavy AND needs official sources), (b) the exact `first_call_sequence` of web-search + scrape-links calls to fire, (c) 25\u201350 keyword seeds for your first `web-search` call, (d) iteration hints, (e) gaps to watch, (f) stop criteria. No goal? You still get the generic 3-tool playbook. Other tools work without calling this, but you will use them worse.",
169
168
  schema: startResearchParamsSchema,
170
- outputSchema: startResearchOutputSchema,
171
169
  annotations: {
172
170
  readOnlyHint: true,
173
171
  idempotentHint: true,
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../../src/tools/start-research.ts"],
4
- "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport {\n startResearchOutputSchema,\n startResearchParamsSchema,\n type StartResearchOutput,\n type StartResearchParams,\n} from '../schemas/start-research.js';\nimport {\n createLLMProcessor,\n generateResearchBrief,\n getLLMHealth,\n renderResearchBrief,\n type LLMHealthSnapshot,\n} from '../services/llm-processor.js';\nimport { classifyError } from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\nimport { toolFailure, toolSuccess, toToolResponse, type ToolExecutionResult } from './mcp-helpers.js';\nimport { formatError } from './utils.js';\n\nconst SKILL_INSTALL_HINT = [\n '> \uD83D\uDCA1 **Pair this server with the `run-research` skill** for the full agentic playbook',\n '> (single-agent loop, multi-agent orchestrator, mission-prompt templates, output discipline).',\n '> Install once per machine \u2014 the skill is what teaches the agent how to spend these tools well:',\n '>',\n '> ```bash',\n '> npx -y skills add -y -g https://github.com/yigitkonur/skills-by-yigitkonur --skill /run-research',\n '> ```',\n '>',\n '> Already installed? Skip this \u2014 the skill auto-loads on relevant prompts. The full pack',\n '> ships ~50 sibling skills: `npx -y skills add -y -g https://github.com/yigitkonur/skills-by-yigitkonur`.',\n].join('\\n');\n\n/**\n * Full research-loop playbook. Teaches the 3-tool mental model\n * (start-research, web-search, scrape-links), the aggressive multi-call\n * discipline, parallel-callability, and the cite-from-scrape rule.\n *\n * Emitted when the LLM planner is healthy OR `include_playbook: true`.\n */\nexport function buildStaticScaffolding(goal?: string, opts: { plannerAvailable?: boolean } = {}): string {\n const plannerAvailable = opts.plannerAvailable ?? true;\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not yet specified \u2014 set one on the next pass';\n\n const classifierLoopStep = plannerAvailable\n ? '3. Read the classifier output: `synthesis` (citations in `[rank]`), `gaps[]` (with ids), `refine_queries[]` (follow-ups tied to gap ids).'\n : '3. Classifier output is NOT available (LLM planner offline). `web-search` returns a raw ranked list \u2014 synthesize the terrain yourself from titles + snippets.';\n\n return [\n SKILL_INSTALL_HINT,\n '',\n '# Research session started',\n '',\n focusLine,\n '',\n 'You are running a research LOOP, not answering from memory. Training data is stale; the web is authoritative for anything dated, versioned, priced, or contested. Every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt you read. Never cite a URL from a `web-search` snippet alone.',\n '',\n '## The 3 tools',\n '',\n '**1. `start-research`** \u2014 you just called me. I plan this session and return the brief below. Call me again only if the goal materially shifts.',\n '',\n '**2. `web-search`** \u2014 fan out Google queries in parallel. One call carries **up to 50 queries** in a flat `queries` array. Call me **aggressively** \u2014 2\u20134 rounds per session is normal, not 1. After each pass, read `gaps[]` and `refine_queries[]` and fire another round with the harvested terms. **Parallel-safe**: run multiple `web-search` calls in the same turn for orthogonal subtopics (e.g. one call for \"spec\" queries, one call for \"sentiment\" queries). `scope` values:',\n '- `\"reddit\"` \u2192 server appends `site:reddit.com` and filters to post permalinks. Use for sentiment / migration / lived experience.',\n '- `\"web\"` (default) \u2192 open web. Use for spec / bug / pricing / CVE / API / primary-source hunts.',\n '- `\"both\"` \u2192 fans each query across both. Use when the topic is opinion-heavy AND needs official sources.',\n '',\n '**3. `scrape-links`** \u2014 fetch URLs in parallel and run per-URL LLM extraction. **Auto-detects** `reddit.com/r/.../comments/` permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Mix Reddit + web URLs freely \u2014 both branches run concurrently. **Parallel-safe**: prefer multiple `scrape-links` calls with contextually grouped URLs over one giant mixed batch. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (explicit gaps this page did NOT answer), `## Follow-up signals` (new terms + referenced-but-unscraped URLs that seed your next `web-search` round). Describe extraction SHAPE in `extract`, facets separated by `|`: `root cause | affected versions | fix | workarounds | timeline`.',\n '',\n '## The loop',\n '',\n '1. Read the brief below (if present). Note `primary_branch`, `keyword_seeds`, `gaps_to_watch`, `stop_criteria`.',\n '2. Fire `first_call_sequence` in order. For `primary_branch: reddit`, lead with `web-search scope:\"reddit\"` \u2192 `scrape-links` on the best post permalinks. For `web`, lead with `web-search scope:\"web\"` \u2192 `scrape-links` on HIGHLY_RELEVANT URLs. For `both`, issue two parallel `web-search` calls (one per scope) in the same turn, then one merged `scrape-links`.',\n classifierLoopStep,\n '4. Scrape every HIGHLY_RELEVANT plus the 2\u20133 best MAYBE_RELEVANT. Group URLs into parallel `scrape-links` calls when contexts differ (e.g. one call for docs, one for reddit threads).',\n '5. Harvest from each scrape extract\\'s `## Follow-up signals` \u2014 new terms, version numbers, vendor names, failure modes, referenced URLs. These seed your next `web-search` round.',\n '6. Fire the next `web-search` round with the harvested terms plus any `refine_queries[]` the classifier suggested. Do NOT paraphrase queries already run \u2014 the classifier tracks them.',\n '7. **Stop** when every `gaps_to_watch` item is closed AND the last `web-search` pass surfaced no new terms, OR when you have completed 4 full passes. State remaining gaps explicitly if you hit the cap.',\n '',\n '## Output discipline',\n '',\n '- Cite URL (or Reddit permalink) for every non-trivial claim \u2014 only from a `scrape-links` excerpt you read.',\n '- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people\\'s words.',\n '- Separate documented facts from inferred conclusions explicitly.',\n '- Include the scrape date for time-sensitive claims.',\n '- If you could not verify something, say so \u2014 do not paper over gaps.',\n '- Never cite a URL from a search snippet alone.',\n '',\n '## Post-cutoff discipline',\n '',\n 'For anything released / changed after your training cutoff \u2014 new products, versions, prices, benchmarks \u2014 treat your own query suggestions as hypotheses until a scraped first-party page confirms them. Include `site:<vendor-domain>` queries in your first `web-search` call when the goal names a vendor or product.',\n ].join('\\n');\n}\n\n/**\n * Compact stub emitted when the LLM planner is offline AND the caller did\n * not opt into the full playbook. Names the 3 tools, the loop, parallel-safety,\n * Reddit branch, and cite-from-scrape \u2014 enough to keep an agent moving.\n */\nexport function buildDegradedStub(goal?: string): string {\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not specified \u2014 set one on the next pass.';\n return [\n SKILL_INSTALL_HINT,\n '',\n '# Research session started (LLM planner offline \u2014 compact stub)',\n '',\n focusLine,\n '',\n '**3 tools**: `start-research` (plans), `web-search` (Google fan-out, up to 50 queries/call, `scope: web|reddit|both`), `scrape-links` (fetch URLs in parallel, auto-detects `reddit.com/r/.../comments/` permalinks \u2192 Reddit API; all other URLs \u2192 HTTP scraper). All three are **parallel-callable** \u2014 fire multiple in the same turn when subtopics are orthogonal.',\n '',\n '**Loop**: `web-search` \u2192 `scrape-links` \u2192 read `## Follow-up signals` \u2192 harvest new terms \u2192 next `web-search` round \u2192 stop when gaps close OR after 4 passes. Call `web-search` aggressively (2\u20134 rounds, not 1).',\n '',\n '**Reddit branch**: use `web-search scope:\"reddit\"` for sentiment / migration / lived experience. Skip for CVE / API spec / pricing. Reddit permalinks go straight into `scrape-links` for threaded post + comments.',\n '',\n '**Cite**: every non-trivial claim must trace to a `scrape-links` excerpt, never a search snippet. Quote verbatim for numbers, versions, stacktraces, people\\'s words.',\n '',\n 'Pass `include_playbook: true` to `start-research` for the full tactic reference.',\n ].join('\\n');\n}\n\n/**\n * Backward-compat alias \u2014 older call sites import `buildOrientation` directly.\n */\nexport const buildOrientation = buildStaticScaffolding;\n\n// ============================================================================\n// Planner-offline gate.\n//\n// The problem we are guarding against: a single transient LLM failure (one bad\n// 429, one malformed JSON response from the classifier) used to poison the\n// gate forever and force every subsequent `start-research` call into the\n// compact stub \u2014 even when env was fine and the next call would have\n// succeeded. That created a deadlock where the very tool that could reset\n// the health flag was the tool being blocked.\n//\n// The safer semantics implemented here:\n// 1. If env is not configured, we are offline. Hard stop.\n// 2. Otherwise, require **two consecutive failures** before gating (one\n// blip is tolerated).\n// 3. Even then, the gate only holds for PLANNER_FAILURE_TTL_MS after the\n// most recent failure. After that window we give the planner another\n// chance regardless of the counter \u2014 if it is still broken the next\n// call's failure will re-arm the gate.\n// 4. Any success resets the counter to 0, so the gate opens immediately\n// on recovery.\n// ============================================================================\n\n/** Minimum consecutive failures before the gate closes. */\nexport const PLANNER_FAILURE_THRESHOLD = 2;\n\n/** How long a recent failure burst keeps the gate closed, in ms. */\nexport const PLANNER_FAILURE_TTL_MS = 60_000;\n\ntype PlannerGateHealth = Pick<\n LLMHealthSnapshot,\n 'plannerConfigured' | 'consecutivePlannerFailures' | 'lastPlannerCheckedAt'\n>;\n\n/**\n * Pure predicate \u2014 returns true when the planner should be treated as\n * offline for the purposes of `start-research`. Kept exported and\n * dependency-free so tests can drive it without touching the LLM.\n */\nexport function isPlannerKnownOffline(\n health: PlannerGateHealth,\n nowMs: number = Date.now(),\n): boolean {\n if (!health.plannerConfigured) {\n return true;\n }\n if (health.consecutivePlannerFailures < PLANNER_FAILURE_THRESHOLD) {\n return false;\n }\n if (health.lastPlannerCheckedAt === null) {\n return false;\n }\n const lastMs = Date.parse(health.lastPlannerCheckedAt);\n if (Number.isNaN(lastMs)) {\n return false;\n }\n return nowMs - lastMs < PLANNER_FAILURE_TTL_MS;\n}\n\nasync function buildGoalAwareBrief(\n goal: string,\n signal?: AbortSignal,\n): Promise<string> {\n const processor = createLLMProcessor();\n if (!processor) {\n mcpLog('info', 'start-research: LLM unavailable, returning static orientation only', 'start-research');\n return '';\n }\n\n const brief = await generateResearchBrief(goal, processor, signal);\n if (!brief) {\n mcpLog('warning', 'start-research: brief generation failed, returning static orientation only', 'start-research');\n return '';\n }\n\n return renderResearchBrief(brief);\n}\n\nasync function handleStartResearch(\n params: StartResearchParams,\n signal?: AbortSignal,\n): Promise<ToolExecutionResult<StartResearchOutput>> {\n try {\n const llmHealth = getLLMHealth();\n const plannerKnownOffline = isPlannerKnownOffline(llmHealth);\n\n if (plannerKnownOffline && !params.include_playbook) {\n const stub = buildDegradedStub(params.goal);\n return toolSuccess(stub);\n }\n\n const scaffolding = buildStaticScaffolding(params.goal, {\n plannerAvailable: !plannerKnownOffline,\n });\n\n let brief = '';\n if (params.goal) {\n brief = await buildGoalAwareBrief(params.goal, signal);\n }\n\n const briefFallbackNote = params.goal && !brief\n ? '\\n\\n---\\n\\n> _Goal-tailored brief unavailable: LLM planner is not configured or failed this call. The static playbook above still applies; you can proceed with it, or retry `start-research` after verifying `LLM_API_KEY`._'\n : '';\n\n const content = brief\n ? `${scaffolding}\\n\\n---\\n\\n${brief}`\n : `${scaffolding}${briefFallbackNote}`;\n\n return toolSuccess(content);\n } catch (err: unknown) {\n const structuredError = classifyError(err);\n mcpLog('error', `start-research: ${structuredError.message}`, 'start-research');\n return toolFailure(\n formatError({\n code: structuredError.code,\n message: structuredError.message,\n retryable: structuredError.retryable,\n toolName: 'start-research',\n howToFix: ['Retry start-research. If the failure persists, verify LLM_API_KEY / LLM_BASE_URL / LLM_MODEL.'],\n }),\n );\n }\n}\n\nexport function registerStartResearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'start-research',\n title: 'Start Research Session',\n description:\n 'Call this FIRST every research session. Provide a `goal`; I return a goal-tailored brief naming (a) `primary_branch` (reddit for sentiment/migration, web for spec/bug/pricing, both when opinion-heavy AND needs official sources), (b) the exact `first_call_sequence` of web-search + scrape-links calls to fire, (c) 25\u201350 keyword seeds for your first `web-search` call, (d) iteration hints, (e) gaps to watch, (f) stop criteria. No goal? You still get the generic 3-tool playbook. Other tools work without calling this, but you will use them worse.',\n schema: startResearchParamsSchema,\n outputSchema: startResearchOutputSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: false,\n },\n },\n async (args) => toToolResponse(await handleStartResearch(args)),\n );\n}\n"],
5
- "mappings": "AAEA;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,qBAAqB;AAC9B,SAAS,cAAc;AACvB,SAAS,aAAa,aAAa,sBAAgD;AACnF,SAAS,mBAAmB;AAE5B,MAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,EAAE,KAAK,IAAI;AASJ,SAAS,uBAAuB,MAAe,OAAuC,CAAC,GAAW;AACvG,QAAM,mBAAmB,KAAK,oBAAoB;AAClD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AAEJ,QAAM,qBAAqB,mBACvB,8IACA;AAEJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOO,SAAS,kBAAkB,MAAuB;AACvD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AACJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAKO,MAAM,mBAAmB;AAyBzB,MAAM,4BAA4B;AAGlC,MAAM,yBAAyB;AAY/B,SAAS,sBACd,QACA,QAAgB,KAAK,IAAI,GAChB;AACT,MAAI,CAAC,OAAO,mBAAmB;AAC7B,WAAO;AAAA,EACT;AACA,MAAI,OAAO,6BAA6B,2BAA2B;AACjE,WAAO;AAAA,EACT;AACA,MAAI,OAAO,yBAAyB,MAAM;AACxC,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,MAAM,OAAO,oBAAoB;AACrD,MAAI,OAAO,MAAM,MAAM,GAAG;AACxB,WAAO;AAAA,EACT;AACA,SAAO,QAAQ,SAAS;AAC1B;AAEA,eAAe,oBACb,MACA,QACiB;AACjB,QAAM,YAAY,mBAAmB;AACrC,MAAI,CAAC,WAAW;AACd,WAAO,QAAQ,sEAAsE,gBAAgB;AACrG,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,MAAM,sBAAsB,MAAM,WAAW,MAAM;AACjE,MAAI,CAAC,OAAO;AACV,WAAO,WAAW,8EAA8E,gBAAgB;AAChH,WAAO;AAAA,EACT;AAEA,SAAO,oBAAoB,KAAK;AAClC;AAEA,eAAe,oBACb,QACA,QACmD;AACnD,MAAI;AACF,UAAM,YAAY,aAAa;AAC/B,UAAM,sBAAsB,sBAAsB,SAAS;AAE3D,QAAI,uBAAuB,CAAC,OAAO,kBAAkB;AACnD,YAAM,OAAO,kBAAkB,OAAO,IAAI;AAC1C,aAAO,YAAY,IAAI;AAAA,IACzB;AAEA,UAAM,cAAc,uBAAuB,OAAO,MAAM;AAAA,MACtD,kBAAkB,CAAC;AAAA,IACrB,CAAC;AAED,QAAI,QAAQ;AACZ,QAAI,OAAO,MAAM;AACf,cAAQ,MAAM,oBAAoB,OAAO,MAAM,MAAM;AAAA,IACvD;AAEA,UAAM,oBAAoB,OAAO,QAAQ,CAAC,QACtC,kOACA;AAEJ,UAAM,UAAU,QACZ,GAAG,WAAW;AAAA;AAAA;AAAA;AAAA,EAAc,KAAK,KACjC,GAAG,WAAW,GAAG,iBAAiB;AAEtC,WAAO,YAAY,OAAO;AAAA,EAC5B,SAAS,KAAc;AACrB,UAAM,kBAAkB,cAAc,GAAG;AACzC,WAAO,SAAS,mBAAmB,gBAAgB,OAAO,IAAI,gBAAgB;AAC9E,WAAO;AAAA,MACL,YAAY;AAAA,QACV,MAAM,gBAAgB;AAAA,QACtB,SAAS,gBAAgB;AAAA,QACzB,WAAW,gBAAgB;AAAA,QAC3B,UAAU;AAAA,QACV,UAAU,CAAC,+FAA+F;AAAA,MAC5G,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEO,SAAS,0BAA0B,QAAyB;AACjE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,cAAc;AAAA,MACd,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,SAAS,eAAe,MAAM,oBAAoB,IAAI,CAAC;AAAA,EAChE;AACF;",
4
+ "sourcesContent": ["import type { MCPServer } from 'mcp-use/server';\n\nimport {\n startResearchParamsSchema,\n type StartResearchOutput,\n type StartResearchParams,\n} from '../schemas/start-research.js';\nimport {\n createLLMProcessor,\n generateResearchBrief,\n getLLMHealth,\n renderResearchBrief,\n type LLMHealthSnapshot,\n} from '../services/llm-processor.js';\nimport { classifyError } from '../utils/errors.js';\nimport { mcpLog } from '../utils/logger.js';\nimport { toolFailure, toolSuccess, toToolResponse, type ToolExecutionResult } from './mcp-helpers.js';\nimport { formatError } from './utils.js';\n\nconst SKILL_INSTALL_HINT = [\n '> \uD83D\uDCA1 **Pair this server with the `run-research` skill** for the full agentic playbook',\n '> (single-agent loop, multi-agent orchestrator, mission-prompt templates, output discipline).',\n '> Install once per machine \u2014 the skill is what teaches the agent how to spend these tools well:',\n '>',\n '> ```bash',\n '> npx -y skills add -y -g https://github.com/yigitkonur/skills-by-yigitkonur --skill /run-research',\n '> ```',\n '>',\n '> Already installed? Skip this \u2014 the skill auto-loads on relevant prompts. The full pack',\n '> ships ~50 sibling skills: `npx -y skills add -y -g https://github.com/yigitkonur/skills-by-yigitkonur`.',\n].join('\\n');\n\n/**\n * Full research-loop playbook. Teaches the 3-tool mental model\n * (start-research, web-search, scrape-links), the aggressive multi-call\n * discipline, parallel-callability, and the cite-from-scrape rule.\n *\n * Emitted when the LLM planner is healthy OR `include_playbook: true`.\n */\nexport function buildStaticScaffolding(goal?: string, opts: { plannerAvailable?: boolean } = {}): string {\n const plannerAvailable = opts.plannerAvailable ?? true;\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not yet specified \u2014 set one on the next pass';\n\n const classifierLoopStep = plannerAvailable\n ? '3. Read the classifier output: `synthesis` (citations in `[rank]`), `gaps[]` (with ids), `refine_queries[]` (follow-ups tied to gap ids).'\n : '3. Classifier output is NOT available (LLM planner offline). `web-search` returns a raw ranked list \u2014 synthesize the terrain yourself from titles + snippets.';\n\n return [\n SKILL_INSTALL_HINT,\n '',\n '# Research session started',\n '',\n focusLine,\n '',\n 'You are running a research LOOP, not answering from memory. Training data is stale; the web is authoritative for anything dated, versioned, priced, or contested. Every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt you read. Never cite a URL from a `web-search` snippet alone.',\n '',\n '## The 3 tools',\n '',\n '**1. `start-research`** \u2014 you just called me. I plan this session and return the brief below. Call me again only if the goal materially shifts.',\n '',\n '**2. `web-search`** \u2014 fan out Google queries in parallel. One call carries **up to 50 queries** in a flat `queries` array. Call me **aggressively** \u2014 2\u20134 rounds per session is normal, not 1. After each pass, read `gaps[]` and `refine_queries[]` and fire another round with the harvested terms. **Parallel-safe**: run multiple `web-search` calls in the same turn for orthogonal subtopics (e.g. one call for \"spec\" queries, one call for \"sentiment\" queries). `scope` values:',\n '- `\"reddit\"` \u2192 server appends `site:reddit.com` and filters to post permalinks. Use for sentiment / migration / lived experience.',\n '- `\"web\"` (default) \u2192 open web. Use for spec / bug / pricing / CVE / API / primary-source hunts.',\n '- `\"both\"` \u2192 fans each query across both. Use when the topic is opinion-heavy AND needs official sources.',\n '',\n '**3. `scrape-links`** \u2014 fetch URLs in parallel and run per-URL LLM extraction. **Auto-detects** `reddit.com/r/.../comments/` permalinks and routes them through the Reddit API (threaded post + comments); everything else flows through the HTTP scraper. Mix Reddit + web URLs freely \u2014 both branches run concurrently. **Parallel-safe**: prefer multiple `scrape-links` calls with contextually grouped URLs over one giant mixed batch. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (explicit gaps this page did NOT answer), `## Follow-up signals` (new terms + referenced-but-unscraped URLs that seed your next `web-search` round). Describe extraction SHAPE in `extract`, facets separated by `|`: `root cause | affected versions | fix | workarounds | timeline`.',\n '',\n '## The loop',\n '',\n '1. Read the brief below (if present). Note `primary_branch`, `keyword_seeds`, `gaps_to_watch`, `stop_criteria`.',\n '2. Fire `first_call_sequence` in order. For `primary_branch: reddit`, lead with `web-search scope:\"reddit\"` \u2192 `scrape-links` on the best post permalinks. For `web`, lead with `web-search scope:\"web\"` \u2192 `scrape-links` on HIGHLY_RELEVANT URLs. For `both`, issue two parallel `web-search` calls (one per scope) in the same turn, then one merged `scrape-links`.',\n classifierLoopStep,\n '4. Scrape every HIGHLY_RELEVANT plus the 2\u20133 best MAYBE_RELEVANT. Group URLs into parallel `scrape-links` calls when contexts differ (e.g. one call for docs, one for reddit threads).',\n '5. Harvest from each scrape extract\\'s `## Follow-up signals` \u2014 new terms, version numbers, vendor names, failure modes, referenced URLs. These seed your next `web-search` round.',\n '6. Fire the next `web-search` round with the harvested terms plus any `refine_queries[]` the classifier suggested. Do NOT paraphrase queries already run \u2014 the classifier tracks them.',\n '7. **Stop** when every `gaps_to_watch` item is closed AND the last `web-search` pass surfaced no new terms, OR when you have completed 4 full passes. State remaining gaps explicitly if you hit the cap.',\n '',\n '## Output discipline',\n '',\n '- Cite URL (or Reddit permalink) for every non-trivial claim \u2014 only from a `scrape-links` excerpt you read.',\n '- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people\\'s words.',\n '- Separate documented facts from inferred conclusions explicitly.',\n '- Include the scrape date for time-sensitive claims.',\n '- If you could not verify something, say so \u2014 do not paper over gaps.',\n '- Never cite a URL from a search snippet alone.',\n '',\n '## Post-cutoff discipline',\n '',\n 'For anything released / changed after your training cutoff \u2014 new products, versions, prices, benchmarks \u2014 treat your own query suggestions as hypotheses until a scraped first-party page confirms them. Include `site:<vendor-domain>` queries in your first `web-search` call when the goal names a vendor or product.',\n ].join('\\n');\n}\n\n/**\n * Compact stub emitted when the LLM planner is offline AND the caller did\n * not opt into the full playbook. Names the 3 tools, the loop, parallel-safety,\n * Reddit branch, and cite-from-scrape \u2014 enough to keep an agent moving.\n */\nexport function buildDegradedStub(goal?: string): string {\n const focusLine = goal\n ? `> Focus for this session: ${goal}`\n : '> Focus for this session: not specified \u2014 set one on the next pass.';\n return [\n SKILL_INSTALL_HINT,\n '',\n '# Research session started (LLM planner offline \u2014 compact stub)',\n '',\n focusLine,\n '',\n '**3 tools**: `start-research` (plans), `web-search` (Google fan-out, up to 50 queries/call, `scope: web|reddit|both`), `scrape-links` (fetch URLs in parallel, auto-detects `reddit.com/r/.../comments/` permalinks \u2192 Reddit API; all other URLs \u2192 HTTP scraper). All three are **parallel-callable** \u2014 fire multiple in the same turn when subtopics are orthogonal.',\n '',\n '**Loop**: `web-search` \u2192 `scrape-links` \u2192 read `## Follow-up signals` \u2192 harvest new terms \u2192 next `web-search` round \u2192 stop when gaps close OR after 4 passes. Call `web-search` aggressively (2\u20134 rounds, not 1).',\n '',\n '**Reddit branch**: use `web-search scope:\"reddit\"` for sentiment / migration / lived experience. Skip for CVE / API spec / pricing. Reddit permalinks go straight into `scrape-links` for threaded post + comments.',\n '',\n '**Cite**: every non-trivial claim must trace to a `scrape-links` excerpt, never a search snippet. Quote verbatim for numbers, versions, stacktraces, people\\'s words.',\n '',\n 'Pass `include_playbook: true` to `start-research` for the full tactic reference.',\n ].join('\\n');\n}\n\n/**\n * Backward-compat alias \u2014 older call sites import `buildOrientation` directly.\n */\nexport const buildOrientation = buildStaticScaffolding;\n\n// ============================================================================\n// Planner-offline gate.\n//\n// The problem we are guarding against: a single transient LLM failure (one bad\n// 429, one malformed JSON response from the classifier) used to poison the\n// gate forever and force every subsequent `start-research` call into the\n// compact stub \u2014 even when env was fine and the next call would have\n// succeeded. That created a deadlock where the very tool that could reset\n// the health flag was the tool being blocked.\n//\n// The safer semantics implemented here:\n// 1. If env is not configured, we are offline. Hard stop.\n// 2. Otherwise, require **two consecutive failures** before gating (one\n// blip is tolerated).\n// 3. Even then, the gate only holds for PLANNER_FAILURE_TTL_MS after the\n// most recent failure. After that window we give the planner another\n// chance regardless of the counter \u2014 if it is still broken the next\n// call's failure will re-arm the gate.\n// 4. Any success resets the counter to 0, so the gate opens immediately\n// on recovery.\n// ============================================================================\n\n/** Minimum consecutive failures before the gate closes. */\nexport const PLANNER_FAILURE_THRESHOLD = 2;\n\n/** How long a recent failure burst keeps the gate closed, in ms. */\nexport const PLANNER_FAILURE_TTL_MS = 60_000;\n\ntype PlannerGateHealth = Pick<\n LLMHealthSnapshot,\n 'plannerConfigured' | 'consecutivePlannerFailures' | 'lastPlannerCheckedAt'\n>;\n\n/**\n * Pure predicate \u2014 returns true when the planner should be treated as\n * offline for the purposes of `start-research`. Kept exported and\n * dependency-free so tests can drive it without touching the LLM.\n */\nexport function isPlannerKnownOffline(\n health: PlannerGateHealth,\n nowMs: number = Date.now(),\n): boolean {\n if (!health.plannerConfigured) {\n return true;\n }\n if (health.consecutivePlannerFailures < PLANNER_FAILURE_THRESHOLD) {\n return false;\n }\n if (health.lastPlannerCheckedAt === null) {\n return false;\n }\n const lastMs = Date.parse(health.lastPlannerCheckedAt);\n if (Number.isNaN(lastMs)) {\n return false;\n }\n return nowMs - lastMs < PLANNER_FAILURE_TTL_MS;\n}\n\nasync function buildGoalAwareBrief(\n goal: string,\n signal?: AbortSignal,\n): Promise<string> {\n const processor = createLLMProcessor();\n if (!processor) {\n mcpLog('info', 'start-research: LLM unavailable, returning static orientation only', 'start-research');\n return '';\n }\n\n const brief = await generateResearchBrief(goal, processor, signal);\n if (!brief) {\n mcpLog('warning', 'start-research: brief generation failed, returning static orientation only', 'start-research');\n return '';\n }\n\n return renderResearchBrief(brief);\n}\n\nasync function handleStartResearch(\n params: StartResearchParams,\n signal?: AbortSignal,\n): Promise<ToolExecutionResult<StartResearchOutput>> {\n try {\n const llmHealth = getLLMHealth();\n const plannerKnownOffline = isPlannerKnownOffline(llmHealth);\n\n if (plannerKnownOffline && !params.include_playbook) {\n const stub = buildDegradedStub(params.goal);\n return toolSuccess(stub);\n }\n\n const scaffolding = buildStaticScaffolding(params.goal, {\n plannerAvailable: !plannerKnownOffline,\n });\n\n let brief = '';\n if (params.goal) {\n brief = await buildGoalAwareBrief(params.goal, signal);\n }\n\n const briefFallbackNote = params.goal && !brief\n ? '\\n\\n---\\n\\n> _Goal-tailored brief unavailable: LLM planner is not configured or failed this call. The static playbook above still applies; you can proceed with it, or retry `start-research` after verifying `LLM_API_KEY`._'\n : '';\n\n const content = brief\n ? `${scaffolding}\\n\\n---\\n\\n${brief}`\n : `${scaffolding}${briefFallbackNote}`;\n\n return toolSuccess(content);\n } catch (err: unknown) {\n const structuredError = classifyError(err);\n mcpLog('error', `start-research: ${structuredError.message}`, 'start-research');\n return toolFailure(\n formatError({\n code: structuredError.code,\n message: structuredError.message,\n retryable: structuredError.retryable,\n toolName: 'start-research',\n howToFix: ['Retry start-research. If the failure persists, verify LLM_API_KEY / LLM_BASE_URL / LLM_MODEL.'],\n }),\n );\n }\n}\n\nexport function registerStartResearchTool(server: MCPServer): void {\n server.tool(\n {\n name: 'start-research',\n title: 'Start Research Session',\n description:\n 'Call this FIRST every research session. Provide a `goal`; I return a goal-tailored brief naming (a) `primary_branch` (reddit for sentiment/migration, web for spec/bug/pricing, both when opinion-heavy AND needs official sources), (b) the exact `first_call_sequence` of web-search + scrape-links calls to fire, (c) 25\u201350 keyword seeds for your first `web-search` call, (d) iteration hints, (e) gaps to watch, (f) stop criteria. No goal? You still get the generic 3-tool playbook. Other tools work without calling this, but you will use them worse.',\n schema: startResearchParamsSchema,\n annotations: {\n readOnlyHint: true,\n idempotentHint: true,\n destructiveHint: false,\n openWorldHint: false,\n },\n },\n async (args) => toToolResponse(await handleStartResearch(args)),\n );\n}\n"],
5
+ "mappings": "AAEA;AAAA,EACE;AAAA,OAGK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OAEK;AACP,SAAS,qBAAqB;AAC9B,SAAS,cAAc;AACvB,SAAS,aAAa,aAAa,sBAAgD;AACnF,SAAS,mBAAmB;AAE5B,MAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,EAAE,KAAK,IAAI;AASJ,SAAS,uBAAuB,MAAe,OAAuC,CAAC,GAAW;AACvG,QAAM,mBAAmB,KAAK,oBAAoB;AAClD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AAEJ,QAAM,qBAAqB,mBACvB,8IACA;AAEJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOO,SAAS,kBAAkB,MAAuB;AACvD,QAAM,YAAY,OACd,6BAA6B,IAAI,KACjC;AACJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAKO,MAAM,mBAAmB;AAyBzB,MAAM,4BAA4B;AAGlC,MAAM,yBAAyB;AAY/B,SAAS,sBACd,QACA,QAAgB,KAAK,IAAI,GAChB;AACT,MAAI,CAAC,OAAO,mBAAmB;AAC7B,WAAO;AAAA,EACT;AACA,MAAI,OAAO,6BAA6B,2BAA2B;AACjE,WAAO;AAAA,EACT;AACA,MAAI,OAAO,yBAAyB,MAAM;AACxC,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,MAAM,OAAO,oBAAoB;AACrD,MAAI,OAAO,MAAM,MAAM,GAAG;AACxB,WAAO;AAAA,EACT;AACA,SAAO,QAAQ,SAAS;AAC1B;AAEA,eAAe,oBACb,MACA,QACiB;AACjB,QAAM,YAAY,mBAAmB;AACrC,MAAI,CAAC,WAAW;AACd,WAAO,QAAQ,sEAAsE,gBAAgB;AACrG,WAAO;AAAA,EACT;AAEA,QAAM,QAAQ,MAAM,sBAAsB,MAAM,WAAW,MAAM;AACjE,MAAI,CAAC,OAAO;AACV,WAAO,WAAW,8EAA8E,gBAAgB;AAChH,WAAO;AAAA,EACT;AAEA,SAAO,oBAAoB,KAAK;AAClC;AAEA,eAAe,oBACb,QACA,QACmD;AACnD,MAAI;AACF,UAAM,YAAY,aAAa;AAC/B,UAAM,sBAAsB,sBAAsB,SAAS;AAE3D,QAAI,uBAAuB,CAAC,OAAO,kBAAkB;AACnD,YAAM,OAAO,kBAAkB,OAAO,IAAI;AAC1C,aAAO,YAAY,IAAI;AAAA,IACzB;AAEA,UAAM,cAAc,uBAAuB,OAAO,MAAM;AAAA,MACtD,kBAAkB,CAAC;AAAA,IACrB,CAAC;AAED,QAAI,QAAQ;AACZ,QAAI,OAAO,MAAM;AACf,cAAQ,MAAM,oBAAoB,OAAO,MAAM,MAAM;AAAA,IACvD;AAEA,UAAM,oBAAoB,OAAO,QAAQ,CAAC,QACtC,kOACA;AAEJ,UAAM,UAAU,QACZ,GAAG,WAAW;AAAA;AAAA;AAAA;AAAA,EAAc,KAAK,KACjC,GAAG,WAAW,GAAG,iBAAiB;AAEtC,WAAO,YAAY,OAAO;AAAA,EAC5B,SAAS,KAAc;AACrB,UAAM,kBAAkB,cAAc,GAAG;AACzC,WAAO,SAAS,mBAAmB,gBAAgB,OAAO,IAAI,gBAAgB;AAC9E,WAAO;AAAA,MACL,YAAY;AAAA,QACV,MAAM,gBAAgB;AAAA,QACtB,SAAS,gBAAgB;AAAA,QACzB,WAAW,gBAAgB;AAAA,QAC3B,UAAU;AAAA,QACV,UAAU,CAAC,+FAA+F;AAAA,MAC5G,CAAC;AAAA,IACH;AAAA,EACF;AACF;AAEO,SAAS,0BAA0B,QAAyB;AACjE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aACE;AAAA,MACF,QAAQ;AAAA,MACR,aAAa;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,iBAAiB;AAAA,QACjB,eAAe;AAAA,MACjB;AAAA,IACF;AAAA,IACA,OAAO,SAAS,eAAe,MAAM,oBAAoB,IAAI,CAAC;AAAA,EAChE;AACF;",
6
6
  "names": []
7
7
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-researchpowerpack",
3
- "version": "6.0.13",
3
+ "version": "6.0.15",
4
4
  "description": "HTTP-first MCP research server: start-research (goal-tailored brief), web-search (with Reddit scope), scrape-links (auto-detects Reddit URLs) — built on mcp-use.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -1,45 +0,0 @@
1
- import { text } from "mcp-use/server";
2
- import { z } from "zod";
3
- function registerDeepResearchPrompt(server) {
4
- server.prompt(
5
- {
6
- name: "deep-research",
7
- title: "Deep Research",
8
- description: "Multi-pass research loop on a topic using the research-powerpack tools.",
9
- schema: z.object({
10
- topic: z.string().describe('Topic to research. Be specific about what "done" looks like \u2014 the first tool call will generate a goal-tailored research brief from it.')
11
- })
12
- },
13
- async ({ topic }) => text(
14
- [
15
- "You are a research agent using the research-powerpack MCP tools (3 tools: `start-research`, `web-search`, `scrape-links`). You are running a research LOOP, not answering from memory \u2014 every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt. Never cite a URL from a `web-search` snippet alone.",
16
- "",
17
- `Research goal: ${topic}`,
18
- "",
19
- "## Workflow",
20
- "",
21
- "1. **Call `start-research` with `goal` = the research goal above.** The server returns a goal-tailored brief: classified goal type, `primary_branch` (reddit / web / both), the exact `first_call_sequence`, 25\u201350 keyword seeds for your first `web-search` call, iteration hints, gaps to watch, and stop criteria.",
22
- "2. **Fire `first_call_sequence` in order.**",
23
- ' - `primary_branch: web` \u2192 one `web-search` (scope: "web") with all keyword seeds in a flat `queries` array, then one `scrape-links` on the HIGHLY_RELEVANT + 2\u20133 best MAYBE_RELEVANT URLs.',
24
- ' - `primary_branch: reddit` \u2192 one `web-search` (scope: "reddit") with the seeds, then one `scrape-links` on the best post permalinks (auto-detected \u2192 Reddit API threaded post + comments).',
25
- ' - `primary_branch: both` \u2192 two parallel `web-search` calls in one turn (scope: "web" + scope: "reddit"), then one merged `scrape-links`.',
26
- ' Set `extract` on `web-search` to a specific description of what "relevant" means for this goal (not just a keyword).',
27
- "3. **Read the classifier output**: `synthesis` (grounded in `[rank]` citations), `gaps` (each with an id), `refine_queries` (follow-ups linked to gap ids). If confidence is `low`, trust the `gaps` list more than the synthesis.",
28
- "4. **Read every scrape extract**. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (admitted gaps), `## Follow-up signals` (new terms + referenced-but-unscraped URLs). Harvest from `## Follow-up signals` \u2014 those terms seed your next `web-search` round.",
29
- "5. **Loop**: build the next `web-search` with the harvested terms + classifier-suggested refines. Scrape HIGHLY_RELEVANT URLs in contextually grouped parallel `scrape-links` calls (docs in one call, reddit threads in another). Stop when every `gaps_to_watch` item is closed AND no new terms appeared, OR after 4 passes \u2014 whichever comes first.",
30
- "",
31
- "## Output discipline",
32
- "",
33
- "- Cite URL (or Reddit permalink) for every non-trivial claim.",
34
- "- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people's words.",
35
- "- Separate documented facts from inferred conclusions explicitly.",
36
- "- Include scrape dates on time-sensitive claims.",
37
- "- If any `stop_criteria` item from the brief is unmet, say so \u2014 do not paper over it."
38
- ].join("\n")
39
- )
40
- );
41
- }
42
- export {
43
- registerDeepResearchPrompt
44
- };
45
- //# sourceMappingURL=deep-research.js.map
@@ -1,7 +0,0 @@
1
- {
2
- "version": 3,
3
- "sources": ["../../../src/prompts/deep-research.ts"],
4
- "sourcesContent": ["import { text, type MCPServer } from 'mcp-use/server';\nimport { z } from 'zod';\n\nexport function registerDeepResearchPrompt(server: MCPServer): void {\n server.prompt(\n {\n name: 'deep-research',\n title: 'Deep Research',\n description: 'Multi-pass research loop on a topic using the research-powerpack tools.',\n schema: z.object({\n topic: z.string().describe('Topic to research. Be specific about what \"done\" looks like \u2014 the first tool call will generate a goal-tailored research brief from it.'),\n }),\n },\n async ({ topic }) => text(\n [\n 'You are a research agent using the research-powerpack MCP tools (3 tools: `start-research`, `web-search`, `scrape-links`). You are running a research LOOP, not answering from memory \u2014 every non-trivial claim in your final answer must be traceable to a `scrape-links` excerpt. Never cite a URL from a `web-search` snippet alone.',\n '',\n `Research goal: ${topic}`,\n '',\n '## Workflow',\n '',\n '1. **Call `start-research` with `goal` = the research goal above.** The server returns a goal-tailored brief: classified goal type, `primary_branch` (reddit / web / both), the exact `first_call_sequence`, 25\u201350 keyword seeds for your first `web-search` call, iteration hints, gaps to watch, and stop criteria.',\n '2. **Fire `first_call_sequence` in order.**',\n ' - `primary_branch: web` \u2192 one `web-search` (scope: \"web\") with all keyword seeds in a flat `queries` array, then one `scrape-links` on the HIGHLY_RELEVANT + 2\u20133 best MAYBE_RELEVANT URLs.',\n ' - `primary_branch: reddit` \u2192 one `web-search` (scope: \"reddit\") with the seeds, then one `scrape-links` on the best post permalinks (auto-detected \u2192 Reddit API threaded post + comments).',\n ' - `primary_branch: both` \u2192 two parallel `web-search` calls in one turn (scope: \"web\" + scope: \"reddit\"), then one merged `scrape-links`.',\n ' Set `extract` on `web-search` to a specific description of what \"relevant\" means for this goal (not just a keyword).',\n '3. **Read the classifier output**: `synthesis` (grounded in `[rank]` citations), `gaps` (each with an id), `refine_queries` (follow-ups linked to gap ids). If confidence is `low`, trust the `gaps` list more than the synthesis.',\n '4. **Read every scrape extract**. Each page returns `## Source`, `## Matches` (verbatim facts), `## Not found` (admitted gaps), `## Follow-up signals` (new terms + referenced-but-unscraped URLs). Harvest from `## Follow-up signals` \u2014 those terms seed your next `web-search` round.',\n '5. **Loop**: build the next `web-search` with the harvested terms + classifier-suggested refines. Scrape HIGHLY_RELEVANT URLs in contextually grouped parallel `scrape-links` calls (docs in one call, reddit threads in another). Stop when every `gaps_to_watch` item is closed AND no new terms appeared, OR after 4 passes \u2014 whichever comes first.',\n '',\n '## Output discipline',\n '',\n '- Cite URL (or Reddit permalink) for every non-trivial claim.',\n '- Quote verbatim: numbers, versions, API names, prices, error messages, stacktraces, people\\'s words.',\n '- Separate documented facts from inferred conclusions explicitly.',\n '- Include scrape dates on time-sensitive claims.',\n '- If any `stop_criteria` item from the brief is unmet, say so \u2014 do not paper over it.',\n ].join('\\n'),\n ),\n );\n}\n"],
5
- "mappings": "AAAA,SAAS,YAA4B;AACrC,SAAS,SAAS;AAEX,SAAS,2BAA2B,QAAyB;AAClE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aAAa;AAAA,MACb,QAAQ,EAAE,OAAO;AAAA,QACf,OAAO,EAAE,OAAO,EAAE,SAAS,8IAAyI;AAAA,MACtK,CAAC;AAAA,IACH;AAAA,IACA,OAAO,EAAE,MAAM,MAAM;AAAA,MACnB;AAAA,QACE;AAAA,QACA;AAAA,QACA,kBAAkB,KAAK;AAAA,QACvB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF,EAAE,KAAK,IAAI;AAAA,IACb;AAAA,EACF;AACF;",
6
- "names": []
7
- }
@@ -1,47 +0,0 @@
1
- import { text } from "mcp-use/server";
2
- import { z } from "zod";
3
- function registerRedditSentimentPrompt(server) {
4
- server.prompt(
5
- {
6
- name: "reddit-sentiment",
7
- title: "Reddit Sentiment",
8
- description: "Research Reddit sentiment for a topic using the research-powerpack tools \u2014 lived experience, migration stories, agreement/dissent distribution.",
9
- schema: z.object({
10
- topic: z.string().describe('Topic to evaluate. Phrase it as a sentiment question \u2014 "what developers actually think about X", "why teams moved from X to Y".'),
11
- subreddits: z.string().optional().describe('Optional comma-separated subreddit filters, e.g. "webdev,javascript".')
12
- })
13
- },
14
- async ({ topic, subreddits }) => {
15
- const subredditList = subreddits ? subreddits.split(",").map((value) => value.trim().replace(/^\/?r\//i, "")).filter(Boolean) : [];
16
- const subredditScope = subredditList.length ? ` Scope Reddit searches to ${subredditList.map((s) => `r/${s}`).join(", ")} when possible.` : "";
17
- return text(
18
- [
19
- "You are a research agent using the research-powerpack MCP tools (3 tools: `start-research`, `web-search`, `scrape-links`) to characterize Reddit sentiment. You are running a research LOOP, not answering from memory. Sentiment claims must be traceable to specific Reddit threads you expanded via `scrape-links` \u2014 never cite a thread you have not scraped.",
20
- "",
21
- `Research goal: Reddit sentiment on "${topic}" \u2014 agreement distribution, dissent distribution, representative verbatim quotes with attribution, and the strongest causal explanations.${subredditScope}`,
22
- "",
23
- "## Workflow",
24
- "",
25
- "1. **Call `start-research` with `goal` = the research goal above.** The brief will classify this as `sentiment`, set `primary_branch` to `reddit` (or `both` if official sources also matter), and list 25\u201350 seed queries ready for `web-search`.",
26
- '2. **Fire two parallel `web-search` calls in one turn** \u2014 one with `scope: "reddit"` for post-permalink discovery, one with `scope: "web"` for supporting evidence (post-mortems, blog write-ups, GitHub issues). Set `extract` to describe the shape of the sentiment answer: "agreement reasons | dissent reasons | representative quotes | migration drivers".',
27
- "3. **Shortlist the strongest Reddit threads** \u2014 those with (a) high comment count, (b) visible disagreement in replies, (c) specific stack/environment details from the OP. Avoid single-comment threads.",
28
- "4. **Fetch with `scrape-links`** \u2014 batch 3\u201310 reddit.com post permalinks in one call. `scrape-links` auto-detects `reddit.com/r/.../comments/` URLs and routes them through the Reddit API (threaded post + full comment tree). Read every comment tree end-to-end, not just the top-voted reply.",
29
- '5. **Scrape supporting evidence** with another `scrape-links` call (in parallel, different call from the reddit batch) \u2014 blog post-mortems, GitHub issues, HN discussions referenced in the threads. Use `extract` = "concrete reasons | stack details | version numbers | outcome". The extractor preserves verbatim quotes and surfaces referenced-but-unscraped URLs under `## Follow-up signals`.',
30
- '6. **Loop**: if the classifier flags gaps ("no dissent voices captured", "no migration timeline") or brief `gaps_to_watch` are unmet, build new queries and run another pass. Stop after 4 passes or when sentiment distribution stabilizes across two passes.',
31
- "",
32
- "## Output discipline",
33
- "",
34
- '- Report sentiment as a distribution ("~N of M replies agreed / ~K dissented / rest off-topic"), not a single mood label.',
35
- "- Cite every quote with the Reddit thread permalink plus `u/username` attribution.",
36
- "- Separate OP claims from reply-thread consensus \u2014 they often diverge.",
37
- "- If dissent is present, surface the strongest dissenting quote verbatim, even if the majority view dominates.",
38
- "- Include the scrape date on every time-sensitive claim."
39
- ].join("\n")
40
- );
41
- }
42
- );
43
- }
44
- export {
45
- registerRedditSentimentPrompt
46
- };
47
- //# sourceMappingURL=reddit-sentiment.js.map
@@ -1,7 +0,0 @@
1
- {
2
- "version": 3,
3
- "sources": ["../../../src/prompts/reddit-sentiment.ts"],
4
- "sourcesContent": ["import { text, type MCPServer } from 'mcp-use/server';\nimport { z } from 'zod';\n\nexport function registerRedditSentimentPrompt(server: MCPServer): void {\n server.prompt(\n {\n name: 'reddit-sentiment',\n title: 'Reddit Sentiment',\n description: 'Research Reddit sentiment for a topic using the research-powerpack tools \u2014 lived experience, migration stories, agreement/dissent distribution.',\n schema: z.object({\n topic: z.string().describe('Topic to evaluate. Phrase it as a sentiment question \u2014 \"what developers actually think about X\", \"why teams moved from X to Y\".'),\n subreddits: z.string().optional().describe('Optional comma-separated subreddit filters, e.g. \"webdev,javascript\".'),\n }),\n },\n async ({ topic, subreddits }) => {\n const subredditList = subreddits\n ? subreddits\n .split(',')\n .map((value) => value.trim().replace(/^\\/?r\\//i, ''))\n .filter(Boolean)\n : [];\n const subredditScope = subredditList.length\n ? ` Scope Reddit searches to ${subredditList.map((s) => `r/${s}`).join(', ')} when possible.`\n : '';\n\n return text(\n [\n 'You are a research agent using the research-powerpack MCP tools (3 tools: `start-research`, `web-search`, `scrape-links`) to characterize Reddit sentiment. You are running a research LOOP, not answering from memory. Sentiment claims must be traceable to specific Reddit threads you expanded via `scrape-links` \u2014 never cite a thread you have not scraped.',\n '',\n `Research goal: Reddit sentiment on \"${topic}\" \u2014 agreement distribution, dissent distribution, representative verbatim quotes with attribution, and the strongest causal explanations.${subredditScope}`,\n '',\n '## Workflow',\n '',\n '1. **Call `start-research` with `goal` = the research goal above.** The brief will classify this as `sentiment`, set `primary_branch` to `reddit` (or `both` if official sources also matter), and list 25\u201350 seed queries ready for `web-search`.',\n '2. **Fire two parallel `web-search` calls in one turn** \u2014 one with `scope: \"reddit\"` for post-permalink discovery, one with `scope: \"web\"` for supporting evidence (post-mortems, blog write-ups, GitHub issues). Set `extract` to describe the shape of the sentiment answer: \"agreement reasons | dissent reasons | representative quotes | migration drivers\".',\n '3. **Shortlist the strongest Reddit threads** \u2014 those with (a) high comment count, (b) visible disagreement in replies, (c) specific stack/environment details from the OP. Avoid single-comment threads.',\n '4. **Fetch with `scrape-links`** \u2014 batch 3\u201310 reddit.com post permalinks in one call. `scrape-links` auto-detects `reddit.com/r/.../comments/` URLs and routes them through the Reddit API (threaded post + full comment tree). Read every comment tree end-to-end, not just the top-voted reply.',\n '5. **Scrape supporting evidence** with another `scrape-links` call (in parallel, different call from the reddit batch) \u2014 blog post-mortems, GitHub issues, HN discussions referenced in the threads. Use `extract` = \"concrete reasons | stack details | version numbers | outcome\". The extractor preserves verbatim quotes and surfaces referenced-but-unscraped URLs under `## Follow-up signals`.',\n '6. **Loop**: if the classifier flags gaps (\"no dissent voices captured\", \"no migration timeline\") or brief `gaps_to_watch` are unmet, build new queries and run another pass. Stop after 4 passes or when sentiment distribution stabilizes across two passes.',\n '',\n '## Output discipline',\n '',\n '- Report sentiment as a distribution (\"~N of M replies agreed / ~K dissented / rest off-topic\"), not a single mood label.',\n '- Cite every quote with the Reddit thread permalink plus `u/username` attribution.',\n '- Separate OP claims from reply-thread consensus \u2014 they often diverge.',\n '- If dissent is present, surface the strongest dissenting quote verbatim, even if the majority view dominates.',\n '- Include the scrape date on every time-sensitive claim.',\n ].join('\\n'),\n );\n },\n );\n}\n"],
5
- "mappings": "AAAA,SAAS,YAA4B;AACrC,SAAS,SAAS;AAEX,SAAS,8BAA8B,QAAyB;AACrE,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,MACP,aAAa;AAAA,MACb,QAAQ,EAAE,OAAO;AAAA,QACf,OAAO,EAAE,OAAO,EAAE,SAAS,sIAAiI;AAAA,QAC5J,YAAY,EAAE,OAAO,EAAE,SAAS,EAAE,SAAS,uEAAuE;AAAA,MACpH,CAAC;AAAA,IACH;AAAA,IACA,OAAO,EAAE,OAAO,WAAW,MAAM;AAC/B,YAAM,gBAAgB,aAClB,WACG,MAAM,GAAG,EACT,IAAI,CAAC,UAAU,MAAM,KAAK,EAAE,QAAQ,YAAY,EAAE,CAAC,EACnD,OAAO,OAAO,IACjB,CAAC;AACL,YAAM,iBAAiB,cAAc,SACjC,6BAA6B,cAAc,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE,EAAE,KAAK,IAAI,CAAC,oBAC1E;AAEJ,aAAO;AAAA,QACL;AAAA,UACE;AAAA,UACA;AAAA,UACA,uCAAuC,KAAK,iJAA4I,cAAc;AAAA,UACtM;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF,EAAE,KAAK,IAAI;AAAA,MACb;AAAA,IACF;AAAA,EACF;AACF;",
6
- "names": []
7
- }