firecrawl-mcp 1.9.0 → 1.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +0 -0
- package/README.md +319 -63
- package/dist/index.js +199 -14
- package/dist/index.test.js +0 -0
- package/dist/jest.setup.js +58 -0
- package/dist/src/index.js +1053 -0
- package/dist/src/index.test.js +225 -0
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -10,9 +10,25 @@ dotenv.config();
|
|
|
10
10
|
// Tool definitions
|
|
11
11
|
const SCRAPE_TOOL = {
|
|
12
12
|
name: 'firecrawl_scrape',
|
|
13
|
-
description:
|
|
14
|
-
|
|
15
|
-
|
|
13
|
+
description: `
|
|
14
|
+
Scrape content from a single URL with advanced options.
|
|
15
|
+
|
|
16
|
+
**Best for:** Single page content extraction, when you know exactly which page contains the information.
|
|
17
|
+
**Not recommended for:** Multiple pages (use batch_scrape), unknown page (use search), structured data (use extract).
|
|
18
|
+
**Common mistakes:** Using scrape for a list of URLs (use batch_scrape instead).
|
|
19
|
+
**Prompt Example:** "Get the content of the page at https://example.com."
|
|
20
|
+
**Usage Example:**
|
|
21
|
+
\`\`\`json
|
|
22
|
+
{
|
|
23
|
+
"name": "firecrawl_scrape",
|
|
24
|
+
"arguments": {
|
|
25
|
+
"url": "https://example.com",
|
|
26
|
+
"formats": ["markdown"]
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
\`\`\`
|
|
30
|
+
**Returns:** Markdown, HTML, or other formats as specified.
|
|
31
|
+
`,
|
|
16
32
|
inputSchema: {
|
|
17
33
|
type: 'object',
|
|
18
34
|
properties: {
|
|
@@ -163,7 +179,24 @@ const SCRAPE_TOOL = {
|
|
|
163
179
|
};
|
|
164
180
|
const MAP_TOOL = {
|
|
165
181
|
name: 'firecrawl_map',
|
|
166
|
-
description:
|
|
182
|
+
description: `
|
|
183
|
+
Map a website to discover all indexed URLs on the site.
|
|
184
|
+
|
|
185
|
+
**Best for:** Discovering URLs on a website before deciding what to scrape; finding specific sections of a website.
|
|
186
|
+
**Not recommended for:** When you already know which specific URL you need (use scrape or batch_scrape); when you need the content of the pages (use scrape after mapping).
|
|
187
|
+
**Common mistakes:** Using crawl to discover URLs instead of map.
|
|
188
|
+
**Prompt Example:** "List all URLs on example.com."
|
|
189
|
+
**Usage Example:**
|
|
190
|
+
\`\`\`json
|
|
191
|
+
{
|
|
192
|
+
"name": "firecrawl_map",
|
|
193
|
+
"arguments": {
|
|
194
|
+
"url": "https://example.com"
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
\`\`\`
|
|
198
|
+
**Returns:** Array of URLs found on the site.
|
|
199
|
+
`,
|
|
167
200
|
inputSchema: {
|
|
168
201
|
type: 'object',
|
|
169
202
|
properties: {
|
|
@@ -197,8 +230,29 @@ const MAP_TOOL = {
|
|
|
197
230
|
};
|
|
198
231
|
const CRAWL_TOOL = {
|
|
199
232
|
name: 'firecrawl_crawl',
|
|
200
|
-
description:
|
|
201
|
-
|
|
233
|
+
description: `
|
|
234
|
+
Starts an asynchronous crawl job on a website and extracts content from all pages.
|
|
235
|
+
|
|
236
|
+
**Best for:** Extracting content from multiple related pages, when you need comprehensive coverage.
|
|
237
|
+
**Not recommended for:** Extracting content from a single page (use scrape); when token limits are a concern (use map + batch_scrape); when you need fast results (crawling can be slow).
|
|
238
|
+
**Warning:** Crawl responses can be very large and may exceed token limits. Limit the crawl depth and number of pages, or use map + batch_scrape for better control.
|
|
239
|
+
**Common mistakes:** Setting limit or maxDepth too high (causes token overflow); using crawl for a single page (use scrape instead).
|
|
240
|
+
**Prompt Example:** "Get all blog posts from the first two levels of example.com/blog."
|
|
241
|
+
**Usage Example:**
|
|
242
|
+
\`\`\`json
|
|
243
|
+
{
|
|
244
|
+
"name": "firecrawl_crawl",
|
|
245
|
+
"arguments": {
|
|
246
|
+
"url": "https://example.com/blog/*",
|
|
247
|
+
"maxDepth": 2,
|
|
248
|
+
"limit": 100,
|
|
249
|
+
"allowExternalLinks": false,
|
|
250
|
+
"deduplicateSimilarURLs": true
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
\`\`\`
|
|
254
|
+
**Returns:** Operation ID for status checking; use firecrawl_check_crawl_status to check progress.
|
|
255
|
+
`,
|
|
202
256
|
inputSchema: {
|
|
203
257
|
type: 'object',
|
|
204
258
|
properties: {
|
|
@@ -307,7 +361,20 @@ const CRAWL_TOOL = {
|
|
|
307
361
|
};
|
|
308
362
|
const CHECK_CRAWL_STATUS_TOOL = {
|
|
309
363
|
name: 'firecrawl_check_crawl_status',
|
|
310
|
-
description:
|
|
364
|
+
description: `
|
|
365
|
+
Check the status of a crawl job.
|
|
366
|
+
|
|
367
|
+
**Usage Example:**
|
|
368
|
+
\`\`\`json
|
|
369
|
+
{
|
|
370
|
+
"name": "firecrawl_check_crawl_status",
|
|
371
|
+
"arguments": {
|
|
372
|
+
"id": "550e8400-e29b-41d4-a716-446655440000"
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
\`\`\`
|
|
376
|
+
**Returns:** Status and progress of the crawl job, including results if available.
|
|
377
|
+
`,
|
|
311
378
|
inputSchema: {
|
|
312
379
|
type: 'object',
|
|
313
380
|
properties: {
|
|
@@ -321,8 +388,31 @@ const CHECK_CRAWL_STATUS_TOOL = {
|
|
|
321
388
|
};
|
|
322
389
|
const SEARCH_TOOL = {
|
|
323
390
|
name: 'firecrawl_search',
|
|
324
|
-
description:
|
|
325
|
-
|
|
391
|
+
description: `
|
|
392
|
+
Search the web and optionally extract content from search results.
|
|
393
|
+
|
|
394
|
+
**Best for:** Finding specific information across multiple websites, when you don't know which website has the information; when you need the most relevant content for a query.
|
|
395
|
+
**Not recommended for:** When you already know which website to scrape (use scrape); when you need comprehensive coverage of a single website (use map or crawl).
|
|
396
|
+
**Common mistakes:** Using crawl or map for open-ended questions (use search instead).
|
|
397
|
+
**Prompt Example:** "Find the latest research papers on AI published in 2023."
|
|
398
|
+
**Usage Example:**
|
|
399
|
+
\`\`\`json
|
|
400
|
+
{
|
|
401
|
+
"name": "firecrawl_search",
|
|
402
|
+
"arguments": {
|
|
403
|
+
"query": "latest AI research papers 2023",
|
|
404
|
+
"limit": 5,
|
|
405
|
+
"lang": "en",
|
|
406
|
+
"country": "us",
|
|
407
|
+
"scrapeOptions": {
|
|
408
|
+
"formats": ["markdown"],
|
|
409
|
+
"onlyMainContent": true
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
\`\`\`
|
|
414
|
+
**Returns:** Array of search results (with optional scraped content).
|
|
415
|
+
`,
|
|
326
416
|
inputSchema: {
|
|
327
417
|
type: 'object',
|
|
328
418
|
properties: {
|
|
@@ -393,8 +483,45 @@ const SEARCH_TOOL = {
|
|
|
393
483
|
};
|
|
394
484
|
const EXTRACT_TOOL = {
|
|
395
485
|
name: 'firecrawl_extract',
|
|
396
|
-
description:
|
|
397
|
-
|
|
486
|
+
description: `
|
|
487
|
+
Extract structured information from web pages using LLM capabilities. Supports both cloud AI and self-hosted LLM extraction.
|
|
488
|
+
|
|
489
|
+
**Best for:** Extracting specific structured data like prices, names, details.
|
|
490
|
+
**Not recommended for:** When you need the full content of a page (use scrape); when you're not looking for specific structured data.
|
|
491
|
+
**Arguments:**
|
|
492
|
+
- urls: Array of URLs to extract information from
|
|
493
|
+
- prompt: Custom prompt for the LLM extraction
|
|
494
|
+
- systemPrompt: System prompt to guide the LLM
|
|
495
|
+
- schema: JSON schema for structured data extraction
|
|
496
|
+
- allowExternalLinks: Allow extraction from external links
|
|
497
|
+
- enableWebSearch: Enable web search for additional context
|
|
498
|
+
- includeSubdomains: Include subdomains in extraction
|
|
499
|
+
**Prompt Example:** "Extract the product name, price, and description from these product pages."
|
|
500
|
+
**Usage Example:**
|
|
501
|
+
\`\`\`json
|
|
502
|
+
{
|
|
503
|
+
"name": "firecrawl_extract",
|
|
504
|
+
"arguments": {
|
|
505
|
+
"urls": ["https://example.com/page1", "https://example.com/page2"],
|
|
506
|
+
"prompt": "Extract product information including name, price, and description",
|
|
507
|
+
"systemPrompt": "You are a helpful assistant that extracts product information",
|
|
508
|
+
"schema": {
|
|
509
|
+
"type": "object",
|
|
510
|
+
"properties": {
|
|
511
|
+
"name": { "type": "string" },
|
|
512
|
+
"price": { "type": "number" },
|
|
513
|
+
"description": { "type": "string" }
|
|
514
|
+
},
|
|
515
|
+
"required": ["name", "price"]
|
|
516
|
+
},
|
|
517
|
+
"allowExternalLinks": false,
|
|
518
|
+
"enableWebSearch": false,
|
|
519
|
+
"includeSubdomains": false
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
\`\`\`
|
|
523
|
+
**Returns:** Extracted structured data as defined by your schema.
|
|
524
|
+
`,
|
|
398
525
|
inputSchema: {
|
|
399
526
|
type: 'object',
|
|
400
527
|
properties: {
|
|
@@ -433,7 +560,31 @@ const EXTRACT_TOOL = {
|
|
|
433
560
|
};
|
|
434
561
|
const DEEP_RESEARCH_TOOL = {
|
|
435
562
|
name: 'firecrawl_deep_research',
|
|
436
|
-
description:
|
|
563
|
+
description: `
|
|
564
|
+
Conduct deep web research on a query using intelligent crawling, search, and LLM analysis.
|
|
565
|
+
|
|
566
|
+
**Best for:** Complex research questions requiring multiple sources, in-depth analysis.
|
|
567
|
+
**Not recommended for:** Simple questions that can be answered with a single search; when you need very specific information from a known page (use scrape); when you need results quickly (deep research can take time).
|
|
568
|
+
**Arguments:**
|
|
569
|
+
- query (string, required): The research question or topic to explore.
|
|
570
|
+
- maxDepth (number, optional): Maximum recursive depth for crawling/search (default: 3).
|
|
571
|
+
- timeLimit (number, optional): Time limit in seconds for the research session (default: 120).
|
|
572
|
+
- maxUrls (number, optional): Maximum number of URLs to analyze (default: 50).
|
|
573
|
+
**Prompt Example:** "Research the environmental impact of electric vehicles versus gasoline vehicles."
|
|
574
|
+
**Usage Example:**
|
|
575
|
+
\`\`\`json
|
|
576
|
+
{
|
|
577
|
+
"name": "firecrawl_deep_research",
|
|
578
|
+
"arguments": {
|
|
579
|
+
"query": "What are the environmental impacts of electric vehicles compared to gasoline vehicles?",
|
|
580
|
+
"maxDepth": 3,
|
|
581
|
+
"timeLimit": 120,
|
|
582
|
+
"maxUrls": 50
|
|
583
|
+
}
|
|
584
|
+
}
|
|
585
|
+
\`\`\`
|
|
586
|
+
**Returns:** Final analysis generated by an LLM based on research. (data.finalAnalysis); may also include structured activities and sources used in the research process.
|
|
587
|
+
`,
|
|
437
588
|
inputSchema: {
|
|
438
589
|
type: 'object',
|
|
439
590
|
properties: {
|
|
@@ -459,7 +610,29 @@ const DEEP_RESEARCH_TOOL = {
|
|
|
459
610
|
};
|
|
460
611
|
const GENERATE_LLMSTXT_TOOL = {
|
|
461
612
|
name: 'firecrawl_generate_llmstxt',
|
|
462
|
-
description:
|
|
613
|
+
description: `
|
|
614
|
+
Generate a standardized llms.txt (and optionally llms-full.txt) file for a given domain. This file defines how large language models should interact with the site.
|
|
615
|
+
|
|
616
|
+
**Best for:** Creating machine-readable permission guidelines for AI models.
|
|
617
|
+
**Not recommended for:** General content extraction or research.
|
|
618
|
+
**Arguments:**
|
|
619
|
+
- url (string, required): The base URL of the website to analyze.
|
|
620
|
+
- maxUrls (number, optional): Max number of URLs to include (default: 10).
|
|
621
|
+
- showFullText (boolean, optional): Whether to include llms-full.txt contents in the response.
|
|
622
|
+
**Prompt Example:** "Generate an LLMs.txt file for example.com."
|
|
623
|
+
**Usage Example:**
|
|
624
|
+
\`\`\`json
|
|
625
|
+
{
|
|
626
|
+
"name": "firecrawl_generate_llmstxt",
|
|
627
|
+
"arguments": {
|
|
628
|
+
"url": "https://example.com",
|
|
629
|
+
"maxUrls": 20,
|
|
630
|
+
"showFullText": true
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
\`\`\`
|
|
634
|
+
**Returns:** LLMs.txt file contents (and optionally llms-full.txt).
|
|
635
|
+
`,
|
|
463
636
|
inputSchema: {
|
|
464
637
|
type: 'object',
|
|
465
638
|
properties: {
|
|
@@ -725,7 +898,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
725
898
|
content: [
|
|
726
899
|
{
|
|
727
900
|
type: 'text',
|
|
728
|
-
text: trimResponseText(`Started crawl for ${url} with job ID: ${response.id}
|
|
901
|
+
text: trimResponseText(`Started crawl for ${url} with job ID: ${response.id}. Use firecrawl_check_crawl_status to check progress.`),
|
|
729
902
|
},
|
|
730
903
|
],
|
|
731
904
|
isError: false,
|
|
@@ -1021,6 +1194,18 @@ async function runSSELocalServer() {
|
|
|
1021
1194
|
transport.handlePostMessage(req, res);
|
|
1022
1195
|
}
|
|
1023
1196
|
});
|
|
1197
|
+
const PORT = process.env.PORT || 3000;
|
|
1198
|
+
console.log('Starting server on port', PORT);
|
|
1199
|
+
try {
|
|
1200
|
+
app.listen(PORT, () => {
|
|
1201
|
+
console.log(`MCP SSE Server listening on http://localhost:${PORT}`);
|
|
1202
|
+
console.log(`SSE endpoint: http://localhost:${PORT}/sse`);
|
|
1203
|
+
console.log(`Message endpoint: http://localhost:${PORT}/messages`);
|
|
1204
|
+
});
|
|
1205
|
+
}
|
|
1206
|
+
catch (error) {
|
|
1207
|
+
console.error('Error starting server:', error);
|
|
1208
|
+
}
|
|
1024
1209
|
}
|
|
1025
1210
|
async function runSSECloudServer() {
|
|
1026
1211
|
const transports = {};
|
package/dist/index.test.js
CHANGED
|
File without changes
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { jest } from '@jest/globals';
|
|
2
|
+
// Set test timeout
|
|
3
|
+
jest.setTimeout(30000);
|
|
4
|
+
// Create mock responses
|
|
5
|
+
const mockSearchResponse = {
|
|
6
|
+
success: true,
|
|
7
|
+
data: [
|
|
8
|
+
{
|
|
9
|
+
url: 'https://example.com',
|
|
10
|
+
title: 'Test Page',
|
|
11
|
+
description: 'Test Description',
|
|
12
|
+
markdown: '# Test Content',
|
|
13
|
+
actions: null,
|
|
14
|
+
},
|
|
15
|
+
],
|
|
16
|
+
};
|
|
17
|
+
const mockBatchScrapeResponse = {
|
|
18
|
+
success: true,
|
|
19
|
+
id: 'test-batch-id',
|
|
20
|
+
};
|
|
21
|
+
const mockBatchStatusResponse = {
|
|
22
|
+
success: true,
|
|
23
|
+
status: 'completed',
|
|
24
|
+
completed: 1,
|
|
25
|
+
total: 1,
|
|
26
|
+
creditsUsed: 1,
|
|
27
|
+
expiresAt: new Date(),
|
|
28
|
+
data: [
|
|
29
|
+
{
|
|
30
|
+
url: 'https://example.com',
|
|
31
|
+
title: 'Test Page',
|
|
32
|
+
description: 'Test Description',
|
|
33
|
+
markdown: '# Test Content',
|
|
34
|
+
actions: null,
|
|
35
|
+
},
|
|
36
|
+
],
|
|
37
|
+
};
|
|
38
|
+
// Create mock instance methods
|
|
39
|
+
const mockSearch = jest.fn().mockImplementation(async () => mockSearchResponse);
|
|
40
|
+
const mockAsyncBatchScrapeUrls = jest
|
|
41
|
+
.fn()
|
|
42
|
+
.mockImplementation(async () => mockBatchScrapeResponse);
|
|
43
|
+
const mockCheckBatchScrapeStatus = jest
|
|
44
|
+
.fn()
|
|
45
|
+
.mockImplementation(async () => mockBatchStatusResponse);
|
|
46
|
+
// Create mock instance
|
|
47
|
+
const mockInstance = {
|
|
48
|
+
apiKey: 'test-api-key',
|
|
49
|
+
apiUrl: 'test-api-url',
|
|
50
|
+
search: mockSearch,
|
|
51
|
+
asyncBatchScrapeUrls: mockAsyncBatchScrapeUrls,
|
|
52
|
+
checkBatchScrapeStatus: mockCheckBatchScrapeStatus,
|
|
53
|
+
};
|
|
54
|
+
// Mock the module
|
|
55
|
+
jest.mock('@mendable/firecrawl-js', () => ({
|
|
56
|
+
__esModule: true,
|
|
57
|
+
default: jest.fn().mockImplementation(() => mockInstance),
|
|
58
|
+
}));
|