scrapex 0.5.2 → 1.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +392 -145
- package/dist/enhancer-Q6CSc1gA.mjs +220 -0
- package/dist/enhancer-Q6CSc1gA.mjs.map +1 -0
- package/dist/enhancer-oM4BhYYS.cjs +268 -0
- package/dist/enhancer-oM4BhYYS.cjs.map +1 -0
- package/dist/index.cjs +852 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +264 -0
- package/dist/index.d.cts.map +1 -0
- package/dist/index.d.mts +264 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +798 -0
- package/dist/index.mjs.map +1 -0
- package/dist/llm/index.cjs +316 -0
- package/dist/llm/index.cjs.map +1 -0
- package/dist/llm/index.d.cts +211 -0
- package/dist/llm/index.d.cts.map +1 -0
- package/dist/llm/index.d.mts +211 -0
- package/dist/llm/index.d.mts.map +1 -0
- package/dist/llm/index.mjs +310 -0
- package/dist/llm/index.mjs.map +1 -0
- package/dist/parsers/index.cjs +200 -0
- package/dist/parsers/index.cjs.map +1 -0
- package/dist/parsers/index.d.cts +133 -0
- package/dist/parsers/index.d.cts.map +1 -0
- package/dist/parsers/index.d.mts +133 -0
- package/dist/parsers/index.d.mts.map +1 -0
- package/dist/parsers/index.mjs +192 -0
- package/dist/parsers/index.mjs.map +1 -0
- package/dist/types-CNQZVW36.d.mts +150 -0
- package/dist/types-CNQZVW36.d.mts.map +1 -0
- package/dist/types-D0HYR95H.d.cts +150 -0
- package/dist/types-D0HYR95H.d.cts.map +1 -0
- package/package.json +80 -100
- package/dist/index.d.ts +0 -45
- package/dist/index.js +0 -8
- package/dist/scrapex.cjs.development.js +0 -1128
- package/dist/scrapex.cjs.development.js.map +0 -1
- package/dist/scrapex.cjs.production.min.js +0 -2
- package/dist/scrapex.cjs.production.min.js.map +0 -1
- package/dist/scrapex.esm.js +0 -1120
- package/dist/scrapex.esm.js.map +0 -1
package/LICENSE
CHANGED
package/README.md
CHANGED
|
@@ -1,157 +1,404 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
```
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
//
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
1
|
+
# scrapex
|
|
2
|
+
|
|
3
|
+
Modern web scraper with LLM-enhanced extraction, extensible pipeline, and pluggable parsers.
|
|
4
|
+
|
|
5
|
+
> **Alpha Release**: v1.0.0 is currently in alpha. The API may change before the stable release.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- **LLM-Ready Output** - Content extracted as Markdown, optimized for AI/LLM consumption
|
|
10
|
+
- **Provider-Agnostic LLM** - Works with OpenAI, Anthropic, Ollama, LM Studio, or any OpenAI-compatible API
|
|
11
|
+
- **Extensible Pipeline** - Pluggable extractors with priority-based execution
|
|
12
|
+
- **Smart Extraction** - Uses Mozilla Readability for content, Cheerio for metadata
|
|
13
|
+
- **Markdown Parsing** - Parse markdown content, awesome lists, and GitHub repos
|
|
14
|
+
- **TypeScript First** - Full type safety with comprehensive type exports
|
|
15
|
+
- **Dual Format** - ESM and CommonJS builds
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install scrapex@alpha
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
### Optional Peer Dependencies
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
# For LLM features
|
|
27
|
+
npm install openai # OpenAI/Ollama/LM Studio
|
|
28
|
+
npm install @anthropic-ai/sdk # Anthropic Claude
|
|
29
|
+
|
|
30
|
+
# For JavaScript-rendered pages
|
|
31
|
+
npm install puppeteer
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Quick Start
|
|
35
|
+
|
|
36
|
+
```typescript
|
|
37
|
+
import { scrape } from 'scrapex';
|
|
38
|
+
|
|
39
|
+
const result = await scrape('https://example.com/article');
|
|
40
|
+
|
|
41
|
+
console.log(result.title); // "Article Title"
|
|
42
|
+
console.log(result.content); // Markdown content
|
|
43
|
+
console.log(result.textContent); // Plain text (lower tokens)
|
|
44
|
+
console.log(result.excerpt); // First ~300 chars
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## API Reference
|
|
48
|
+
|
|
49
|
+
### `scrape(url, options?)`
|
|
50
|
+
|
|
51
|
+
Fetch and extract metadata and content from a URL.
|
|
52
|
+
|
|
53
|
+
```typescript
|
|
54
|
+
import { scrape } from 'scrapex';
|
|
55
|
+
|
|
56
|
+
const result = await scrape('https://example.com', {
|
|
57
|
+
timeout: 10000,
|
|
58
|
+
userAgent: 'MyBot/1.0',
|
|
59
|
+
extractContent: true,
|
|
60
|
+
maxContentLength: 50000,
|
|
61
|
+
respectRobots: false,
|
|
62
|
+
});
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
### `scrapeHtml(html, url, options?)`
|
|
66
|
+
|
|
67
|
+
Extract from raw HTML without fetching.
|
|
68
|
+
|
|
69
|
+
```typescript
|
|
70
|
+
import { scrapeHtml } from 'scrapex';
|
|
71
|
+
|
|
72
|
+
const html = await fetchSomehow('https://example.com');
|
|
73
|
+
const result = await scrapeHtml(html, 'https://example.com');
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Result Object (`ScrapedData`)
|
|
77
|
+
|
|
78
|
+
```typescript
|
|
79
|
+
interface ScrapedData {
|
|
80
|
+
// Identity
|
|
81
|
+
url: string;
|
|
82
|
+
canonicalUrl: string;
|
|
83
|
+
domain: string;
|
|
84
|
+
|
|
85
|
+
// Basic metadata
|
|
86
|
+
title: string;
|
|
87
|
+
description: string;
|
|
88
|
+
image?: string;
|
|
89
|
+
favicon?: string;
|
|
90
|
+
|
|
91
|
+
// Content (LLM-optimized)
|
|
92
|
+
content: string; // Markdown format
|
|
93
|
+
textContent: string; // Plain text
|
|
94
|
+
excerpt: string; // ~300 char preview
|
|
95
|
+
wordCount: number;
|
|
96
|
+
|
|
97
|
+
// Context
|
|
98
|
+
author?: string;
|
|
99
|
+
publishedAt?: string;
|
|
100
|
+
modifiedAt?: string;
|
|
101
|
+
siteName?: string;
|
|
102
|
+
language?: string;
|
|
103
|
+
|
|
104
|
+
// Classification
|
|
105
|
+
contentType: 'article' | 'repo' | 'docs' | 'package' | 'video' | 'tool' | 'product' | 'unknown';
|
|
106
|
+
keywords: string[];
|
|
107
|
+
|
|
108
|
+
// Structured data
|
|
109
|
+
jsonLd?: Record<string, unknown>[];
|
|
110
|
+
links?: ExtractedLink[];
|
|
111
|
+
|
|
112
|
+
// LLM Enhancements (when enabled)
|
|
113
|
+
summary?: string;
|
|
114
|
+
suggestedTags?: string[];
|
|
115
|
+
entities?: ExtractedEntities;
|
|
116
|
+
extracted?: Record<string, unknown>;
|
|
117
|
+
|
|
118
|
+
// Meta
|
|
119
|
+
scrapedAt: string;
|
|
120
|
+
scrapeTimeMs: number;
|
|
121
|
+
error?: string;
|
|
122
|
+
}
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
## LLM Integration
|
|
126
|
+
|
|
127
|
+
### Using OpenAI
|
|
128
|
+
|
|
129
|
+
```typescript
|
|
130
|
+
import { scrape } from 'scrapex';
|
|
131
|
+
import { createOpenAI } from 'scrapex/llm';
|
|
132
|
+
|
|
133
|
+
const llm = createOpenAI({ apiKey: 'sk-...' });
|
|
134
|
+
|
|
135
|
+
const result = await scrape('https://example.com/article', {
|
|
136
|
+
llm,
|
|
137
|
+
enhance: ['summarize', 'tags', 'entities', 'classify'],
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
console.log(result.summary); // AI-generated summary
|
|
141
|
+
console.log(result.suggestedTags); // ['javascript', 'web', ...]
|
|
142
|
+
console.log(result.entities); // { people: [], organizations: [], ... }
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
### Using Anthropic Claude
|
|
146
|
+
|
|
147
|
+
```typescript
|
|
148
|
+
import { AnthropicProvider } from 'scrapex/llm';
|
|
149
|
+
|
|
150
|
+
const llm = new AnthropicProvider({
|
|
151
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
152
|
+
model: 'claude-3-5-haiku-20241022', // or 'claude-sonnet-4-20250514'
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
const result = await scrape(url, { llm, enhance: ['summarize'] });
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Using Ollama (Local)
|
|
159
|
+
|
|
160
|
+
```typescript
|
|
161
|
+
import { createOllama } from 'scrapex/llm';
|
|
162
|
+
|
|
163
|
+
const llm = createOllama({ model: 'llama3.2' });
|
|
164
|
+
|
|
165
|
+
const result = await scrape(url, { llm, enhance: ['summarize'] });
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Using LM Studio (Local)
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
import { createLMStudio } from 'scrapex/llm';
|
|
172
|
+
|
|
173
|
+
const llm = createLMStudio({ model: 'local-model' });
|
|
174
|
+
|
|
175
|
+
const result = await scrape(url, { llm, enhance: ['summarize'] });
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### Structured Extraction
|
|
179
|
+
|
|
180
|
+
Extract specific data using a schema:
|
|
181
|
+
|
|
182
|
+
```typescript
|
|
183
|
+
const result = await scrape('https://example.com/product', {
|
|
184
|
+
llm,
|
|
185
|
+
extract: {
|
|
186
|
+
productName: 'string',
|
|
187
|
+
price: 'number',
|
|
188
|
+
features: 'string[]',
|
|
189
|
+
inStock: 'boolean',
|
|
190
|
+
sku: 'string?', // optional
|
|
96
191
|
},
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
console.log(result.extracted);
|
|
195
|
+
// { productName: "Widget", price: 29.99, features: [...], inStock: true }
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
## Custom Extractors
|
|
199
|
+
|
|
200
|
+
Create custom extractors to add domain-specific extraction logic:
|
|
201
|
+
|
|
202
|
+
```typescript
|
|
203
|
+
import { scrape, type Extractor, type ExtractionContext } from 'scrapex';
|
|
204
|
+
|
|
205
|
+
const recipeExtractor: Extractor = {
|
|
206
|
+
name: 'recipe',
|
|
207
|
+
priority: 60, // Higher = runs earlier
|
|
208
|
+
|
|
209
|
+
async extract(context: ExtractionContext) {
|
|
210
|
+
const { $ } = context;
|
|
211
|
+
|
|
212
|
+
return {
|
|
213
|
+
custom: {
|
|
214
|
+
ingredients: $('.ingredients li').map((_, el) => $(el).text()).get(),
|
|
215
|
+
cookTime: $('[itemprop="cookTime"]').attr('content'),
|
|
216
|
+
servings: $('[itemprop="recipeYield"]').text(),
|
|
217
|
+
},
|
|
218
|
+
};
|
|
219
|
+
},
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
const result = await scrape('https://example.com/recipe', {
|
|
223
|
+
extractors: [recipeExtractor],
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
console.log(result.custom?.ingredients);
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Replacing Default Extractors
|
|
230
|
+
|
|
231
|
+
```typescript
|
|
232
|
+
const result = await scrape(url, {
|
|
233
|
+
replaceDefaultExtractors: true,
|
|
234
|
+
extractors: [myCustomExtractor],
|
|
235
|
+
});
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
## Markdown Parsing
|
|
239
|
+
|
|
240
|
+
Parse markdown content:
|
|
241
|
+
|
|
242
|
+
```typescript
|
|
243
|
+
import { MarkdownParser, extractListLinks, groupByCategory } from 'scrapex/parsers';
|
|
244
|
+
|
|
245
|
+
// Parse any markdown
|
|
246
|
+
const parser = new MarkdownParser();
|
|
247
|
+
const result = parser.parse(markdownContent);
|
|
248
|
+
|
|
249
|
+
console.log(result.data.title);
|
|
250
|
+
console.log(result.data.sections);
|
|
251
|
+
console.log(result.data.links);
|
|
252
|
+
console.log(result.data.codeBlocks);
|
|
253
|
+
|
|
254
|
+
// Extract links from markdown lists and group by category
|
|
255
|
+
const links = extractListLinks(markdownContent);
|
|
256
|
+
const grouped = groupByCategory(links);
|
|
257
|
+
|
|
258
|
+
grouped.forEach((categoryLinks, category) => {
|
|
259
|
+
console.log(`${category}: ${categoryLinks.length} links`);
|
|
260
|
+
});
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
### GitHub Utilities
|
|
264
|
+
|
|
265
|
+
```typescript
|
|
266
|
+
import {
|
|
267
|
+
isGitHubRepo,
|
|
268
|
+
parseGitHubUrl,
|
|
269
|
+
toRawUrl,
|
|
270
|
+
} from 'scrapex/parsers';
|
|
271
|
+
|
|
272
|
+
isGitHubRepo('https://github.com/owner/repo');
|
|
273
|
+
// true
|
|
274
|
+
|
|
275
|
+
parseGitHubUrl('https://github.com/facebook/react');
|
|
276
|
+
// { owner: 'facebook', repo: 'react' }
|
|
277
|
+
|
|
278
|
+
toRawUrl('https://github.com/owner/repo');
|
|
279
|
+
// 'https://raw.githubusercontent.com/owner/repo/main/README.md'
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
## URL Utilities
|
|
283
|
+
|
|
284
|
+
```typescript
|
|
285
|
+
import {
|
|
286
|
+
isValidUrl,
|
|
287
|
+
normalizeUrl,
|
|
288
|
+
extractDomain,
|
|
289
|
+
resolveUrl,
|
|
290
|
+
isExternalUrl,
|
|
291
|
+
} from 'scrapex';
|
|
292
|
+
|
|
293
|
+
isValidUrl('https://example.com');
|
|
294
|
+
// true
|
|
295
|
+
|
|
296
|
+
normalizeUrl('https://example.com/page?utm_source=twitter');
|
|
297
|
+
// 'https://example.com/page' (tracking params removed)
|
|
298
|
+
|
|
299
|
+
extractDomain('https://www.example.com/path');
|
|
300
|
+
// 'example.com'
|
|
301
|
+
|
|
302
|
+
resolveUrl('/path', 'https://example.com/page');
|
|
303
|
+
// 'https://example.com/path'
|
|
304
|
+
|
|
305
|
+
isExternalUrl('https://other.com', 'example.com');
|
|
306
|
+
// true
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
## Error Handling
|
|
310
|
+
|
|
311
|
+
```typescript
|
|
312
|
+
import { scrape, ScrapeError } from 'scrapex';
|
|
313
|
+
|
|
314
|
+
try {
|
|
315
|
+
const result = await scrape('https://example.com');
|
|
316
|
+
} catch (error) {
|
|
317
|
+
if (error instanceof ScrapeError) {
|
|
318
|
+
console.log(error.code); // 'FETCH_FAILED' | 'TIMEOUT' | 'INVALID_URL' | ...
|
|
319
|
+
console.log(error.statusCode); // HTTP status if available
|
|
320
|
+
console.log(error.isRetryable()); // true for network errors
|
|
321
|
+
}
|
|
119
322
|
}
|
|
120
323
|
```
|
|
121
324
|
|
|
122
|
-
|
|
325
|
+
Error codes:
|
|
326
|
+
- `FETCH_FAILED` - Network request failed
|
|
327
|
+
- `TIMEOUT` - Request timed out
|
|
328
|
+
- `INVALID_URL` - URL is malformed
|
|
329
|
+
- `BLOCKED` - Access denied (403)
|
|
330
|
+
- `NOT_FOUND` - Page not found (404)
|
|
331
|
+
- `ROBOTS_BLOCKED` - Blocked by robots.txt
|
|
332
|
+
- `PARSE_ERROR` - HTML parsing failed
|
|
333
|
+
- `LLM_ERROR` - LLM provider error
|
|
334
|
+
- `VALIDATION_ERROR` - Schema validation failed
|
|
123
335
|
|
|
124
|
-
|
|
336
|
+
## Robots.txt
|
|
125
337
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
- `date` - The document's publication date
|
|
129
|
-
- `copyright` - The document's copyright line, if present
|
|
130
|
-
- `author` - The document's author
|
|
131
|
-
- `publisher` - The document's publisher (website name)
|
|
132
|
-
- `text` - The main text of the document with all the junk thrown away
|
|
133
|
-
- `image` - The main image for the document (what's used by facebook, etc.)
|
|
134
|
-
- `video` - A video URL that best represents the article.
|
|
135
|
-
- `embeds` - An array of iframe, embed, object, video that were embedded in the article.
|
|
136
|
-
- `tags`- Any tags or keywords that could be found by checking href urls at has the following pattern `a[href*='/t/'],a[href*='/tag/'], a[href*='/tags/'], a[href*='/topic/'],a[href*='/tagged/'], a[href*='?keyword=']`.
|
|
137
|
-
- `keywords`- Any keywords that could be found by checking <rel> tags or by looking at href urls.
|
|
138
|
-
- `lang` - The language of the document, either detected or supplied by you.
|
|
139
|
-
- `description` - The description of the document, from <meta> tags
|
|
140
|
-
- `favicon` - The url of the document's [favicon](http://en.wikipedia.org/wiki/Favicon).
|
|
141
|
-
- `links` - An array of links embedded within the main article text. (text and href for each)
|
|
142
|
-
- `logo` — eg. <https://entrepreneur.com/favicon180x180.png>. An image URL that best represents the publisher brand.
|
|
143
|
-
- `content` — readability view html of the article.
|
|
144
|
-
- `html` — full html of the page.
|
|
145
|
-
- `text` — clear text of the readable html.
|
|
146
|
-
- `code` - code segments defined using pre > code tags
|
|
338
|
+
```typescript
|
|
339
|
+
import { scrape, checkRobotsTxt } from 'scrapex';
|
|
147
340
|
|
|
148
|
-
|
|
341
|
+
// Check before scraping
|
|
342
|
+
const check = await checkRobotsTxt('https://example.com/path');
|
|
343
|
+
if (check.allowed) {
|
|
344
|
+
const result = await scrape('https://example.com/path');
|
|
345
|
+
}
|
|
149
346
|
|
|
150
|
-
|
|
347
|
+
// Or let scrape() handle it
|
|
348
|
+
const result = await scrape('https://example.com/path', {
|
|
349
|
+
respectRobots: true, // Throws if blocked
|
|
350
|
+
});
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
## Built-in Extractors
|
|
354
|
+
|
|
355
|
+
| Extractor | Priority | Description |
|
|
356
|
+
|-----------|----------|-------------|
|
|
357
|
+
| `MetaExtractor` | 100 | OG, Twitter, meta tags |
|
|
358
|
+
| `JsonLdExtractor` | 80 | JSON-LD structured data |
|
|
359
|
+
| `ContentExtractor` | 50 | Readability + Turndown |
|
|
360
|
+
| `FaviconExtractor` | 40 | Favicon discovery |
|
|
361
|
+
| `LinksExtractor` | 30 | Content link extraction |
|
|
362
|
+
|
|
363
|
+
## Configuration
|
|
364
|
+
|
|
365
|
+
### Options
|
|
366
|
+
|
|
367
|
+
```typescript
|
|
368
|
+
interface ScrapeOptions {
|
|
369
|
+
timeout?: number; // Default: 10000ms
|
|
370
|
+
userAgent?: string; // Custom user agent
|
|
371
|
+
extractContent?: boolean; // Default: true
|
|
372
|
+
maxContentLength?: number; // Default: 50000 chars
|
|
373
|
+
fetcher?: Fetcher; // Custom fetcher
|
|
374
|
+
extractors?: Extractor[]; // Additional extractors
|
|
375
|
+
replaceDefaultExtractors?: boolean;
|
|
376
|
+
respectRobots?: boolean; // Check robots.txt
|
|
377
|
+
llm?: LLMProvider; // LLM provider
|
|
378
|
+
enhance?: EnhancementType[]; // LLM enhancements
|
|
379
|
+
extract?: ExtractionSchema; // Structured extraction
|
|
380
|
+
}
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
### Enhancement Types
|
|
384
|
+
|
|
385
|
+
```typescript
|
|
386
|
+
type EnhancementType =
|
|
387
|
+
| 'summarize' // Generate summary
|
|
388
|
+
| 'tags' // Extract keywords/tags
|
|
389
|
+
| 'entities' // Extract named entities
|
|
390
|
+
| 'classify'; // Classify content type
|
|
391
|
+
```
|
|
151
392
|
|
|
152
|
-
|
|
153
|
-
|
|
393
|
+
## Requirements
|
|
394
|
+
|
|
395
|
+
- Node.js 20+
|
|
396
|
+
- TypeScript 5.0+ (for type imports)
|
|
154
397
|
|
|
155
398
|
## License
|
|
156
399
|
|
|
157
|
-
|
|
400
|
+
MIT
|
|
401
|
+
|
|
402
|
+
## Author
|
|
403
|
+
|
|
404
|
+
Rakesh Paul - [binaryroute](https://binaryroute.com/authors/rk-paul/)
|