research-powerpack-mcp 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/README.md +486 -0
  2. package/dist/clients/reddit.d.ts +61 -0
  3. package/dist/clients/reddit.d.ts.map +1 -0
  4. package/dist/clients/reddit.js +179 -0
  5. package/dist/clients/reddit.js.map +1 -0
  6. package/dist/clients/research.d.ts +41 -0
  7. package/dist/clients/research.d.ts.map +1 -0
  8. package/dist/clients/research.js +77 -0
  9. package/dist/clients/research.js.map +1 -0
  10. package/dist/clients/scraper.d.ts +44 -0
  11. package/dist/clients/scraper.d.ts.map +1 -0
  12. package/dist/clients/scraper.js +171 -0
  13. package/dist/clients/scraper.js.map +1 -0
  14. package/dist/clients/search.d.ts +46 -0
  15. package/dist/clients/search.d.ts.map +1 -0
  16. package/dist/clients/search.js +91 -0
  17. package/dist/clients/search.js.map +1 -0
  18. package/dist/config/index.d.ts +59 -0
  19. package/dist/config/index.d.ts.map +1 -0
  20. package/dist/config/index.js +100 -0
  21. package/dist/config/index.js.map +1 -0
  22. package/dist/index.d.ts +3 -0
  23. package/dist/index.d.ts.map +1 -0
  24. package/dist/index.js +152 -0
  25. package/dist/index.js.map +1 -0
  26. package/dist/schemas/deep-research.d.ts +100 -0
  27. package/dist/schemas/deep-research.d.ts.map +1 -0
  28. package/dist/schemas/deep-research.js +57 -0
  29. package/dist/schemas/deep-research.js.map +1 -0
  30. package/dist/schemas/scrape-links.d.ts +38 -0
  31. package/dist/schemas/scrape-links.d.ts.map +1 -0
  32. package/dist/schemas/scrape-links.js +26 -0
  33. package/dist/schemas/scrape-links.js.map +1 -0
  34. package/dist/schemas/web-search.d.ts +24 -0
  35. package/dist/schemas/web-search.d.ts.map +1 -0
  36. package/dist/schemas/web-search.js +12 -0
  37. package/dist/schemas/web-search.js.map +1 -0
  38. package/dist/services/file-attachment.d.ts +30 -0
  39. package/dist/services/file-attachment.d.ts.map +1 -0
  40. package/dist/services/file-attachment.js +196 -0
  41. package/dist/services/file-attachment.js.map +1 -0
  42. package/dist/services/llm-processor.d.ts +19 -0
  43. package/dist/services/llm-processor.d.ts.map +1 -0
  44. package/dist/services/llm-processor.js +44 -0
  45. package/dist/services/llm-processor.js.map +1 -0
  46. package/dist/services/markdown-cleaner.d.ts +8 -0
  47. package/dist/services/markdown-cleaner.d.ts.map +1 -0
  48. package/dist/services/markdown-cleaner.js +56 -0
  49. package/dist/services/markdown-cleaner.js.map +1 -0
  50. package/dist/tools/definitions.d.ts +66 -0
  51. package/dist/tools/definitions.d.ts.map +1 -0
  52. package/dist/tools/definitions.js +125 -0
  53. package/dist/tools/definitions.js.map +1 -0
  54. package/dist/tools/reddit.d.ts +10 -0
  55. package/dist/tools/reddit.d.ts.map +1 -0
  56. package/dist/tools/reddit.js +105 -0
  57. package/dist/tools/reddit.js.map +1 -0
  58. package/dist/tools/research.d.ts +14 -0
  59. package/dist/tools/research.d.ts.map +1 -0
  60. package/dist/tools/research.js +126 -0
  61. package/dist/tools/research.js.map +1 -0
  62. package/dist/tools/scrape.d.ts +14 -0
  63. package/dist/tools/scrape.d.ts.map +1 -0
  64. package/dist/tools/scrape.js +111 -0
  65. package/dist/tools/scrape.js.map +1 -0
  66. package/dist/tools/search.d.ts +14 -0
  67. package/dist/tools/search.d.ts.map +1 -0
  68. package/dist/tools/search.js +121 -0
  69. package/dist/tools/search.js.map +1 -0
  70. package/dist/utils/errors.d.ts +8 -0
  71. package/dist/utils/errors.d.ts.map +1 -0
  72. package/dist/utils/errors.js +30 -0
  73. package/dist/utils/errors.js.map +1 -0
  74. package/dist/utils/markdown-formatter.d.ts +5 -0
  75. package/dist/utils/markdown-formatter.d.ts.map +1 -0
  76. package/dist/utils/markdown-formatter.js +15 -0
  77. package/dist/utils/markdown-formatter.js.map +1 -0
  78. package/dist/utils/url-aggregator.d.ts +55 -0
  79. package/dist/utils/url-aggregator.d.ts.map +1 -0
  80. package/dist/utils/url-aggregator.js +246 -0
  81. package/dist/utils/url-aggregator.js.map +1 -0
  82. package/package.json +56 -0
package/README.md ADDED
@@ -0,0 +1,486 @@
1
+ # Research Powerpack MCP
2
+
3
+ **The ultimate research MCP toolkit** — Reddit mining, web search with CTR aggregation, AI-powered deep research, and intelligent web scraping, all in one modular package.
4
+
5
+ [![npm version](https://img.shields.io/npm/v/research-powerpack-mcp.svg)](https://www.npmjs.com/package/research-powerpack-mcp)
6
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
7
+
8
+ ---
9
+
10
+ ## Why Research Powerpack?
11
+
12
+ AI coding assistants are only as good as the context they have. This MCP server gives your AI **superpowers for research**:
13
+
14
+ | Tool | What It Does | Real Value |
15
+ |------|-------------|------------|
16
+ | `web_search` | Batch Google search (up to 100 keywords) with CTR-weighted ranking | Find the most authoritative sources across multiple search angles simultaneously |
17
+ | `search_reddit` | Google-powered Reddit search with advanced operators | Discover real user discussions, opinions, and experiences |
18
+ | `get_reddit_post` | Fetch Reddit posts with smart comment allocation | Extract community wisdom with automatic comment budget distribution |
19
+ | `scrape_links` | Universal URL scraping with automatic fallback | Get full content from any webpage with JS rendering and geo-targeting |
20
+ | `deep_research` | AI-powered batch research with citations | Get comprehensive, evidence-based answers to multiple questions in parallel |
21
+
22
+ **Modular by design** — use just one tool or all five. Configure only the API keys you need.
23
+
24
+ ---
25
+
26
+ ## Quick Start
27
+
28
+ ### 1. Install
29
+
30
+ ```bash
31
+ npm install research-powerpack-mcp
32
+ ```
33
+
34
+ ### 2. Configure (pick what you need)
35
+
36
+ Copy `.env.example` to `.env` and add the API keys for the tools you want:
37
+
38
+ ```bash
39
+ # Minimal (just web search) - FREE
40
+ SERPER_API_KEY=your_serper_key
41
+
42
+ # Full power (all 5 tools)
43
+ SERPER_API_KEY=your_serper_key
44
+ REDDIT_CLIENT_ID=your_reddit_id
45
+ REDDIT_CLIENT_SECRET=your_reddit_secret
46
+ SCRAPEDO_API_KEY=your_scrapedo_key
47
+ OPENROUTER_API_KEY=your_openrouter_key
48
+ ```
49
+
50
+ ### 3. Add to your MCP client
51
+
52
+ **Claude Desktop** (`claude_desktop_config.json`):
53
+ ```json
54
+ {
55
+ "mcpServers": {
56
+ "research-powerpack": {
57
+ "command": "npx",
58
+ "args": ["research-powerpack-mcp"],
59
+ "env": {
60
+ "SERPER_API_KEY": "your_key",
61
+ "REDDIT_CLIENT_ID": "your_id",
62
+ "REDDIT_CLIENT_SECRET": "your_secret",
63
+ "SCRAPEDO_API_KEY": "your_key",
64
+ "OPENROUTER_API_KEY": "your_key"
65
+ }
66
+ }
67
+ }
68
+ }
69
+ ```
70
+
71
+ **Cursor/Windsurf** (`.cursor/mcp.json` or similar):
72
+ ```json
73
+ {
74
+ "mcpServers": {
75
+ "research-powerpack": {
76
+ "command": "npx",
77
+ "args": ["research-powerpack-mcp"],
78
+ "env": {
79
+ "SERPER_API_KEY": "your_key"
80
+ }
81
+ }
82
+ }
83
+ }
84
+ ```
85
+
86
+ ---
87
+
88
+ ## Environment Variables & Tool Availability
89
+
90
+ Research Powerpack uses a **modular architecture**. Tools are automatically enabled based on which API keys you provide:
91
+
92
+ | ENV Variable | Tools Enabled | Free Tier |
93
+ |--------------|---------------|-----------|
94
+ | `SERPER_API_KEY` | `web_search`, `search_reddit` | 2,500 queries |
95
+ | `REDDIT_CLIENT_ID` + `REDDIT_CLIENT_SECRET` | `get_reddit_post` | Unlimited |
96
+ | `SCRAPEDO_API_KEY` | `scrape_links` | 1,000 credits |
97
+ | `OPENROUTER_API_KEY` | `deep_research` + AI extraction in `scrape_links` | Pay-as-you-go |
98
+
99
+ **No ENV = No crash.** The server always starts. If you call a tool without the required API key, you get a helpful error message with setup instructions.
100
+
101
+ ### Configuration Examples
102
+
103
+ ```bash
104
+ # Search-only mode (just web_search and search_reddit)
105
+ SERPER_API_KEY=xxx
106
+
107
+ # Reddit research mode (search + fetch posts)
108
+ SERPER_API_KEY=xxx
109
+ REDDIT_CLIENT_ID=xxx
110
+ REDDIT_CLIENT_SECRET=xxx
111
+
112
+ # Full research mode (all tools)
113
+ SERPER_API_KEY=xxx
114
+ REDDIT_CLIENT_ID=xxx
115
+ REDDIT_CLIENT_SECRET=xxx
116
+ SCRAPEDO_API_KEY=xxx
117
+ OPENROUTER_API_KEY=xxx
118
+ ```
119
+
120
+ ---
121
+
122
+ ## API Key Setup Guides
123
+
124
+ <details>
125
+ <summary><b>🔍 Serper API (Google Search)</b></summary>
126
+
127
+ ### What you get
128
+ - 2,500 free queries/month
129
+ - Fast Google search results via API
130
+ - Enables `web_search` and `search_reddit` tools
131
+
132
+ ### Setup Steps
133
+ 1. Go to [serper.dev](https://serper.dev)
134
+ 2. Click **"Get API Key"** (top right)
135
+ 3. Sign up with email or Google
136
+ 4. Your API key is displayed on the dashboard
137
+ 5. Copy it to your `.env`:
138
+ ```
139
+ SERPER_API_KEY=your_key_here
140
+ ```
141
+
142
+ ### Pricing
143
+ - **Free**: 2,500 queries/month
144
+ - **Paid**: $50/month for 50,000 queries ($0.001/query)
145
+
146
+ </details>
147
+
148
+ <details>
149
+ <summary><b>🤖 Reddit OAuth (Reddit API)</b></summary>
150
+
151
+ ### What you get
152
+ - Unlimited Reddit API access
153
+ - Fetch posts and comments with upvote sorting
154
+ - Enables `get_reddit_post` tool
155
+
156
+ ### Setup Steps
157
+ 1. Go to [reddit.com/prefs/apps](https://www.reddit.com/prefs/apps)
158
+ 2. Scroll down and click **"create another app..."**
159
+ 3. Fill in:
160
+ - **Name**: `research-powerpack` (or any name)
161
+ - **App type**: Select **"script"** (important!)
162
+ - **Description**: Optional
163
+ - **About URL**: Leave blank
164
+ - **Redirect URI**: `http://localhost:8080` (required but not used)
165
+ 4. Click **"create app"**
166
+ 5. Copy your credentials:
167
+ - **Client ID**: The string under your app name (e.g., `yuq_M0kWusHp2olglFBnpw`)
168
+ - **Client Secret**: The "secret" field
169
+ 6. Add to your `.env`:
170
+ ```
171
+ REDDIT_CLIENT_ID=your_client_id
172
+ REDDIT_CLIENT_SECRET=your_client_secret
173
+ ```
174
+
175
+ ### Tips
176
+ - Script apps have the highest rate limits
177
+ - No user authentication required
178
+ - Works immediately after creation
179
+
180
+ </details>
181
+
182
+ <details>
183
+ <summary><b>🌐 Scrape.do (Web Scraping)</b></summary>
184
+
185
+ ### What you get
186
+ - 1,000 free scraping credits
187
+ - JavaScript rendering support
188
+ - Geo-targeting and CAPTCHA handling
189
+ - Enables `scrape_links` tool
190
+
191
+ ### Setup Steps
192
+ 1. Go to [scrape.do](https://scrape.do)
193
+ 2. Click **"Start Free"** or **"Get Started"**
194
+ 3. Sign up with email
195
+ 4. Your API key is on the dashboard
196
+ 5. Add to your `.env`:
197
+ ```
198
+ SCRAPEDO_API_KEY=your_key_here
199
+ ```
200
+
201
+ ### Credit Usage
202
+ - **Basic scrape**: 1 credit
203
+ - **JavaScript rendering**: 5 credits
204
+ - **Geo-targeting**: +25 credits
205
+
206
+ ### Pricing
207
+ - **Free**: 1,000 credits (renews monthly)
208
+ - **Starter**: $29/month for 100,000 credits
209
+
210
+ </details>
211
+
212
+ <details>
213
+ <summary><b>🧠 OpenRouter (AI Models)</b></summary>
214
+
215
+ ### What you get
216
+ - Access to 100+ AI models via one API
217
+ - Enables `deep_research` tool
218
+ - Enables AI extraction in `scrape_links` (`use_llm`, `what_to_extract`)
219
+
220
+ ### Setup Steps
221
+ 1. Go to [openrouter.ai](https://openrouter.ai)
222
+ 2. Click **"Sign In"** → Sign up with Google/GitHub/email
223
+ 3. Go to [openrouter.ai/keys](https://openrouter.ai/keys)
224
+ 4. Click **"Create Key"**
225
+ 5. Copy the key (starts with `sk-or-...`)
226
+ 6. Add to your `.env`:
227
+ ```
228
+ OPENROUTER_API_KEY=sk-or-v1-xxxxx
229
+ ```
230
+
231
+ ### Recommended Models
232
+ The default model is `perplexity/sonar-deep-research` (optimized for research with web search).
233
+
234
+ Alternative models:
235
+ ```bash
236
+ # Fast and capable
237
+ RESEARCH_MODEL=x-ai/grok-4.1-fast
238
+
239
+ # High quality
240
+ RESEARCH_MODEL=anthropic/claude-3.5-sonnet
241
+
242
+ # Budget-friendly
243
+ RESEARCH_MODEL=openai/gpt-4o-mini
244
+ ```
245
+
246
+ ### Pricing
247
+ - Pay-as-you-go (no subscription required)
248
+ - Prices vary by model (~$0.001-$0.03 per 1K tokens)
249
+ - `perplexity/sonar-deep-research`: ~$5 per 1M tokens
250
+
251
+ </details>
252
+
253
+ ---
254
+
255
+ ## Tool Reference
256
+
257
+ ### `web_search`
258
+
259
+ **Batch web search** using Google via Serper API. Search up to 100 keywords in parallel.
260
+
261
+ | Parameter | Type | Required | Description |
262
+ |-----------|------|----------|-------------|
263
+ | `keywords` | `string[]` | Yes | Search queries (1-100). Use distinct keywords for maximum coverage. |
264
+
265
+ **Features:**
266
+ - Google search operators: `site:`, `-exclusion`, `"exact phrase"`, `filetype:`
267
+ - CTR-weighted ranking identifies high-consensus URLs
268
+ - Related search suggestions per query
269
+
270
+ **Example:**
271
+ ```json
272
+ {
273
+ "keywords": [
274
+ "best IDE 2025",
275
+ "VS Code alternatives",
276
+ "Cursor vs Windsurf comparison"
277
+ ]
278
+ }
279
+ ```
280
+
281
+ ---
282
+
283
+ ### `search_reddit`
284
+
285
+ **Search Reddit** via Google with automatic `site:reddit.com` filtering.
286
+
287
+ | Parameter | Type | Required | Description |
288
+ |-----------|------|----------|-------------|
289
+ | `queries` | `string[]` | Yes | Search queries (max 10). Use distinct queries for multiple perspectives. |
290
+ | `date_after` | `string` | No | Filter results after date (YYYY-MM-DD) |
291
+
292
+ **Search Operators:**
293
+ - `intitle:keyword` — Match in post title
294
+ - `"exact phrase"` — Exact match
295
+ - `OR` — Match either term
296
+ - `-exclude` — Exclude term
297
+
298
+ **Example:**
299
+ ```json
300
+ {
301
+ "queries": [
302
+ "best mechanical keyboard 2025",
303
+ "intitle:keyboard recommendation",
304
+ "\"keychron\" OR \"nuphy\" review"
305
+ ],
306
+ "date_after": "2024-01-01"
307
+ }
308
+ ```
309
+
310
+ ---
311
+
312
+ ### `get_reddit_post`
313
+
314
+ **Fetch Reddit posts** with smart comment allocation (1,000 comment budget distributed automatically).
315
+
316
+ | Parameter | Type | Required | Default | Description |
317
+ |-----------|------|----------|---------|-------------|
318
+ | `urls` | `string[]` | Yes | — | Reddit post URLs (2-50) |
319
+ | `fetch_comments` | `boolean` | No | `true` | Whether to fetch comments |
320
+ | `max_comments` | `number` | No | auto | Override comment allocation |
321
+
322
+ **Smart Allocation:**
323
+ - 2 posts: ~500 comments/post (deep dive)
324
+ - 10 posts: ~100 comments/post
325
+ - 50 posts: ~20 comments/post (quick scan)
326
+
327
+ **Example:**
328
+ ```json
329
+ {
330
+ "urls": [
331
+ "https://reddit.com/r/programming/comments/abc123/post_title",
332
+ "https://reddit.com/r/webdev/comments/def456/another_post"
333
+ ],
334
+ "fetch_comments": true
335
+ }
336
+ ```
337
+
338
+ ---
339
+
340
+ ### `scrape_links`
341
+
342
+ **Universal URL content extraction** with automatic fallback modes.
343
+
344
+ | Parameter | Type | Required | Default | Description |
345
+ |-----------|------|----------|---------|-------------|
346
+ | `urls` | `string[]` | Yes | — | URLs to scrape (3-50) |
347
+ | `timeout` | `number` | No | `30` | Timeout per URL (seconds) |
348
+ | `use_llm` | `boolean` | No | `false` | Enable AI extraction (requires `OPENROUTER_API_KEY`) |
349
+ | `what_to_extract` | `string` | No | — | Extraction instructions for AI |
350
+
351
+ **Automatic Fallback:**
352
+ 1. Basic mode (fast)
353
+ 2. JavaScript rendering (for SPAs)
354
+ 3. JavaScript + US geo-targeting (for restricted content)
355
+
356
+ **Token Allocation:** 32,000 tokens distributed across URLs:
357
+ - 3 URLs: ~10,666 tokens/URL
358
+ - 10 URLs: ~3,200 tokens/URL
359
+ - 50 URLs: ~640 tokens/URL
360
+
361
+ **Example:**
362
+ ```json
363
+ {
364
+ "urls": [
365
+ "https://example.com/article1",
366
+ "https://example.com/article2",
367
+ "https://example.com/article3"
368
+ ],
369
+ "use_llm": true,
370
+ "what_to_extract": "Extract the main arguments, key statistics, and conclusions"
371
+ }
372
+ ```
373
+
374
+ ---
375
+
376
+ ### `deep_research`
377
+
378
+ **AI-powered batch research** with web search and citations.
379
+
380
+ | Parameter | Type | Required | Description |
381
+ |-----------|------|----------|-------------|
382
+ | `questions` | `object[]` | Yes | Research questions (2-10) |
383
+ | `questions[].question` | `string` | Yes | The research question |
384
+ | `questions[].file_attachments` | `object[]` | No | Files to include as context |
385
+
386
+ **Token Allocation:** 32,000 tokens distributed across questions:
387
+ - 2 questions: 16,000 tokens/question (deep dive)
388
+ - 5 questions: 6,400 tokens/question (balanced)
389
+ - 10 questions: 3,200 tokens/question (rapid multi-topic)
390
+
391
+ **Example:**
392
+ ```json
393
+ {
394
+ "questions": [
395
+ {
396
+ "question": "What are the current best practices for React Server Components in 2025? Include patterns for data fetching and caching."
397
+ },
398
+ {
399
+ "question": "Compare the performance characteristics of Bun vs Node.js for production workloads. Include benchmarks and real-world case studies."
400
+ }
401
+ ]
402
+ }
403
+ ```
404
+
405
+ ---
406
+
407
+ ## Recommended Workflows
408
+
409
+ ### Research a Technology Decision
410
+
411
+ ```
412
+ 1. web_search: ["React vs Vue 2025", "Next.js vs Nuxt comparison", "frontend framework benchmarks"]
413
+ 2. search_reddit: ["best frontend framework 2025", "migrating from React to Vue", "Next.js production experience"]
414
+ 3. get_reddit_post: [URLs from step 2]
415
+ 4. scrape_links: [Documentation and blog URLs from step 1]
416
+ 5. deep_research: [Synthesize findings into specific questions]
417
+ ```
418
+
419
+ ### Competitive Analysis
420
+
421
+ ```
422
+ 1. web_search: ["competitor name review", "competitor vs alternatives", "competitor pricing"]
423
+ 2. scrape_links: [Competitor websites, review sites, comparison pages]
424
+ 3. search_reddit: ["competitor name experience", "switching from competitor"]
425
+ 4. get_reddit_post: [URLs from step 3]
426
+ ```
427
+
428
+ ### Debug an Obscure Error
429
+
430
+ ```
431
+ 1. web_search: ["exact error message", "error message + framework name"]
432
+ 2. search_reddit: ["error message", "framework + error type"]
433
+ 3. get_reddit_post: [URLs with solutions]
434
+ 4. scrape_links: [Stack Overflow answers, GitHub issues]
435
+ ```
436
+
437
+ ---
438
+
439
+ ## Enable Full Power Mode
440
+
441
+ For the best research experience, configure all four API keys:
442
+
443
+ ```bash
444
+ # .env
445
+ SERPER_API_KEY=your_serper_key # Free: 2,500 queries/month
446
+ REDDIT_CLIENT_ID=your_reddit_id # Free: Unlimited
447
+ REDDIT_CLIENT_SECRET=your_reddit_secret
448
+ SCRAPEDO_API_KEY=your_scrapedo_key # Free: 1,000 credits/month
449
+ OPENROUTER_API_KEY=your_openrouter_key # Pay-as-you-go
450
+ ```
451
+
452
+ This unlocks:
453
+ - **5 research tools** working together
454
+ - **AI-powered content extraction** in scrape_links
455
+ - **Deep research with web search** and citations
456
+ - **Complete Reddit mining** (search → fetch → analyze)
457
+
458
+ Total setup time: ~10 minutes. Total free tier value: ~$50/month equivalent.
459
+
460
+ ---
461
+
462
+ ## Development
463
+
464
+ ```bash
465
+ # Clone
466
+ git clone https://github.com/yigitkonur/research-powerpack-mcp.git
467
+ cd research-powerpack-mcp
468
+
469
+ # Install
470
+ npm install
471
+
472
+ # Development
473
+ npm run dev
474
+
475
+ # Build
476
+ npm run build
477
+
478
+ # Type check
479
+ npm run typecheck
480
+ ```
481
+
482
+ ---
483
+
484
+ ## License
485
+
486
+ MIT © [Yiğit Konur](https://github.com/yigitkonur)
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Reddit OAuth API Client
3
+ * Fetches posts and comments sorted by score (most upvoted first)
4
+ */
5
+ export interface Post {
6
+ title: string;
7
+ author: string;
8
+ subreddit: string;
9
+ body: string;
10
+ score: number;
11
+ commentCount: number;
12
+ url: string;
13
+ created: Date;
14
+ flair?: string;
15
+ isNsfw: boolean;
16
+ isPinned: boolean;
17
+ }
18
+ export interface Comment {
19
+ author: string;
20
+ body: string;
21
+ score: number;
22
+ depth: number;
23
+ isOP: boolean;
24
+ }
25
+ export interface PostResult {
26
+ post: Post;
27
+ comments: Comment[];
28
+ allocatedComments: number;
29
+ actualComments: number;
30
+ }
31
+ export interface BatchPostResult {
32
+ results: Map<string, PostResult | Error>;
33
+ batchesProcessed: number;
34
+ totalPosts: number;
35
+ rateLimitHits: number;
36
+ commentAllocation: CommentAllocation;
37
+ }
38
+ export interface CommentAllocation {
39
+ totalBudget: number;
40
+ perPostBase: number;
41
+ perPostCapped: number;
42
+ redistributed: boolean;
43
+ }
44
+ export declare function calculateCommentAllocation(postCount: number): CommentAllocation;
45
+ export declare class RedditClient {
46
+ private clientId;
47
+ private clientSecret;
48
+ private token;
49
+ private tokenExpiry;
50
+ private userAgent;
51
+ constructor(clientId: string, clientSecret: string);
52
+ private auth;
53
+ private parseUrl;
54
+ getPost(url: string, maxComments?: number): Promise<PostResult>;
55
+ private formatBody;
56
+ private extractComments;
57
+ getPosts(urls: string[], maxComments?: number): Promise<Map<string, PostResult | Error>>;
58
+ batchGetPosts(urls: string[], maxCommentsOverride?: number, fetchComments?: boolean, onBatchComplete?: (batchNum: number, totalBatches: number, processed: number) => void): Promise<BatchPostResult>;
59
+ private delay;
60
+ }
61
+ //# sourceMappingURL=reddit.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"reddit.d.ts","sourceRoot":"","sources":["../../src/clients/reddit.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH,MAAM,WAAW,IAAI;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,YAAY,EAAE,MAAM,CAAC;IACrB,GAAG,EAAE,MAAM,CAAC;IACZ,OAAO,EAAE,IAAI,CAAC;IACd,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,OAAO,CAAC;IAChB,QAAQ,EAAE,OAAO,CAAC;CACnB;AAED,MAAM,WAAW,OAAO;IACtB,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,OAAO,CAAC;CACf;AAED,MAAM,WAAW,UAAU;IACzB,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,iBAAiB,EAAE,MAAM,CAAC;IAC1B,cAAc,EAAE,MAAM,CAAC;CACxB;AAED,MAAM,WAAW,eAAe;IAC9B,OAAO,EAAE,GAAG,CAAC,MAAM,EAAE,UAAU,GAAG,KAAK,CAAC,CAAC;IACzC,gBAAgB,EAAE,MAAM,CAAC;IACzB,UAAU,EAAE,MAAM,CAAC;IACnB,aAAa,EAAE,MAAM,CAAC;IACtB,iBAAiB,EAAE,iBAAiB,CAAC;CACtC;AAED,MAAM,WAAW,iBAAiB;IAChC,WAAW,EAAE,MAAM,CAAC;IACpB,WAAW,EAAE,MAAM,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC;IACtB,aAAa,EAAE,OAAO,CAAC;CACxB;AAED,wBAAgB,0BAA0B,CAAC,SAAS,EAAE,MAAM,GAAG,iBAAiB,CAK/E;AAED,qBAAa,YAAY;IAKX,OAAO,CAAC,QAAQ;IAAU,OAAO,CAAC,YAAY;IAJ1D,OAAO,CAAC,KAAK,CAAuB;IACpC,OAAO,CAAC,WAAW,CAAK;IACxB,OAAO,CAAC,SAAS,CAAmE;gBAEhE,QAAQ,EAAE,MAAM,EAAU,YAAY,EAAE,MAAM;YAEpD,IAAI;IA0BlB,OAAO,CAAC,QAAQ;IAKV,OAAO,CAAC,GAAG,EAAE,MAAM,EAAE,WAAW,SAAM,GAAG,OAAO,CAAC,UAAU,CAAC;IA8DlE,OAAO,CAAC,UAAU;IAOlB,OAAO,CAAC,eAAe;IA4BjB,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,EAAE,WAAW,SAAM,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,UAAU,GAAG,KAAK,CAAC,CAAC;IAUrF,aAAa,CACjB,IAAI,EAAE,MAAM,EAAE,EACd,mBAAmB,CAAC,EAAE,MAAM,EAC5B,aAAa,UAAO,EACpB,eAAe,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,KAAK,IAAI,GACpF,OAAO,CAAC,eAAe,CAAC;IA0C3B,OAAO,CAAC,KAAK;CAGd"}