@librechat/agents 2.4.30 → 2.4.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/dist/cjs/common/enum.cjs +1 -0
  2. package/dist/cjs/common/enum.cjs.map +1 -1
  3. package/dist/cjs/events.cjs +3 -3
  4. package/dist/cjs/events.cjs.map +1 -1
  5. package/dist/cjs/graphs/Graph.cjs +2 -1
  6. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  7. package/dist/cjs/main.cjs +7 -2
  8. package/dist/cjs/main.cjs.map +1 -1
  9. package/dist/cjs/messages/ids.cjs +23 -0
  10. package/dist/cjs/messages/ids.cjs.map +1 -0
  11. package/dist/cjs/splitStream.cjs +2 -1
  12. package/dist/cjs/splitStream.cjs.map +1 -1
  13. package/dist/cjs/stream.cjs +87 -154
  14. package/dist/cjs/stream.cjs.map +1 -1
  15. package/dist/cjs/tools/ToolNode.cjs +14 -3
  16. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  17. package/dist/cjs/tools/handlers.cjs +144 -0
  18. package/dist/cjs/tools/handlers.cjs.map +1 -0
  19. package/dist/cjs/tools/search/content.cjs +140 -0
  20. package/dist/cjs/tools/search/content.cjs.map +1 -0
  21. package/dist/cjs/tools/search/firecrawl.cjs +131 -0
  22. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  23. package/dist/cjs/tools/search/format.cjs +203 -0
  24. package/dist/cjs/tools/search/format.cjs.map +1 -0
  25. package/dist/cjs/tools/search/highlights.cjs +245 -0
  26. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  27. package/dist/cjs/tools/search/rerankers.cjs +194 -0
  28. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  29. package/dist/cjs/tools/search/schema.cjs +70 -0
  30. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  31. package/dist/cjs/tools/search/search.cjs +491 -0
  32. package/dist/cjs/tools/search/search.cjs.map +1 -0
  33. package/dist/cjs/tools/search/tool.cjs +292 -0
  34. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  35. package/dist/cjs/tools/search/utils.cjs +66 -0
  36. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  37. package/dist/esm/common/enum.mjs +1 -0
  38. package/dist/esm/common/enum.mjs.map +1 -1
  39. package/dist/esm/events.mjs +1 -1
  40. package/dist/esm/events.mjs.map +1 -1
  41. package/dist/esm/graphs/Graph.mjs +2 -1
  42. package/dist/esm/graphs/Graph.mjs.map +1 -1
  43. package/dist/esm/main.mjs +4 -1
  44. package/dist/esm/main.mjs.map +1 -1
  45. package/dist/esm/messages/ids.mjs +21 -0
  46. package/dist/esm/messages/ids.mjs.map +1 -0
  47. package/dist/esm/splitStream.mjs +2 -1
  48. package/dist/esm/splitStream.mjs.map +1 -1
  49. package/dist/esm/stream.mjs +87 -152
  50. package/dist/esm/stream.mjs.map +1 -1
  51. package/dist/esm/tools/ToolNode.mjs +14 -3
  52. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  53. package/dist/esm/tools/handlers.mjs +141 -0
  54. package/dist/esm/tools/handlers.mjs.map +1 -0
  55. package/dist/esm/tools/search/content.mjs +119 -0
  56. package/dist/esm/tools/search/content.mjs.map +1 -0
  57. package/dist/esm/tools/search/firecrawl.mjs +128 -0
  58. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  59. package/dist/esm/tools/search/format.mjs +201 -0
  60. package/dist/esm/tools/search/format.mjs.map +1 -0
  61. package/dist/esm/tools/search/highlights.mjs +243 -0
  62. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  63. package/dist/esm/tools/search/rerankers.mjs +188 -0
  64. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  65. package/dist/esm/tools/search/schema.mjs +61 -0
  66. package/dist/esm/tools/search/schema.mjs.map +1 -0
  67. package/dist/esm/tools/search/search.mjs +488 -0
  68. package/dist/esm/tools/search/search.mjs.map +1 -0
  69. package/dist/esm/tools/search/tool.mjs +290 -0
  70. package/dist/esm/tools/search/tool.mjs.map +1 -0
  71. package/dist/esm/tools/search/utils.mjs +61 -0
  72. package/dist/esm/tools/search/utils.mjs.map +1 -0
  73. package/dist/types/common/enum.d.ts +1 -0
  74. package/dist/types/graphs/Graph.d.ts +1 -1
  75. package/dist/types/index.d.ts +2 -0
  76. package/dist/types/messages/ids.d.ts +3 -0
  77. package/dist/types/messages/index.d.ts +1 -0
  78. package/dist/types/scripts/search.d.ts +1 -0
  79. package/dist/types/stream.d.ts +0 -8
  80. package/dist/types/tools/ToolNode.d.ts +6 -0
  81. package/dist/types/tools/example.d.ts +23 -3
  82. package/dist/types/tools/handlers.d.ts +8 -0
  83. package/dist/types/tools/search/content.d.ts +4 -0
  84. package/dist/types/tools/search/firecrawl.d.ts +38 -0
  85. package/dist/types/tools/search/format.d.ts +5 -0
  86. package/dist/types/tools/search/highlights.d.ts +13 -0
  87. package/dist/types/tools/search/index.d.ts +2 -0
  88. package/dist/types/tools/search/rerankers.d.ts +36 -0
  89. package/dist/types/tools/search/schema.d.ts +16 -0
  90. package/dist/types/tools/search/search.d.ts +9 -0
  91. package/dist/types/tools/search/test.d.ts +1 -0
  92. package/dist/types/tools/search/tool.d.ts +33 -0
  93. package/dist/types/tools/search/types.d.ts +540 -0
  94. package/dist/types/tools/search/utils.d.ts +10 -0
  95. package/package.json +10 -7
  96. package/src/common/enum.ts +1 -0
  97. package/src/events.ts +49 -15
  98. package/src/graphs/Graph.ts +6 -2
  99. package/src/index.ts +2 -0
  100. package/src/messages/ids.ts +26 -0
  101. package/src/messages/index.ts +1 -0
  102. package/src/scripts/search.ts +146 -0
  103. package/src/splitStream.test.ts +132 -71
  104. package/src/splitStream.ts +2 -1
  105. package/src/stream.ts +94 -183
  106. package/src/tools/ToolNode.ts +37 -14
  107. package/src/tools/handlers.ts +167 -0
  108. package/src/tools/search/content.test.ts +173 -0
  109. package/src/tools/search/content.ts +147 -0
  110. package/src/tools/search/firecrawl.ts +158 -0
  111. package/src/tools/search/format.ts +252 -0
  112. package/src/tools/search/highlights.ts +320 -0
  113. package/src/tools/search/index.ts +2 -0
  114. package/src/tools/search/output.md +2775 -0
  115. package/src/tools/search/rerankers.ts +269 -0
  116. package/src/tools/search/schema.ts +63 -0
  117. package/src/tools/search/search.ts +680 -0
  118. package/src/tools/search/test.html +884 -0
  119. package/src/tools/search/test.md +643 -0
  120. package/src/tools/search/test.ts +159 -0
  121. package/src/tools/search/tool.ts +427 -0
  122. package/src/tools/search/types.ts +621 -0
  123. package/src/tools/search/utils.ts +79 -0
  124. package/src/utils/llmConfig.ts +1 -1
@@ -0,0 +1,173 @@
1
+ /* eslint-disable @typescript-eslint/no-unused-vars */
2
+ /* eslint-disable no-console */
3
+ // content.test.ts
4
+ import * as fs from 'fs';
5
+ import { processContent } from './content';
6
+
7
+ describe('Link Processor', () => {
8
+ afterAll(() => {
9
+ if (fs.existsSync('./temp.html')) {
10
+ fs.unlinkSync('./temp.html');
11
+ }
12
+ if (fs.existsSync('./temp.md')) {
13
+ fs.unlinkSync('./temp.md');
14
+ }
15
+ });
16
+ // Basic functionality tests
17
+ test('should replace basic links with references', () => {
18
+ const html = `
19
+ <p>Test with <a href="https://example.com/link" title="Example">a link</a></p>
20
+ <p>And an <img src="https://example.com/img.jpg" alt="image"></p>
21
+ <p>Plus a <video src="https://example.com/video.mp4"></video></p>
22
+ `;
23
+
24
+ const markdown = `
25
+ Test with [a link](https://example.com/link "Example")
26
+ And an ![image](https://example.com/img.jpg)
27
+ Plus a [video](https://example.com/video.mp4)
28
+ `;
29
+
30
+ const result = processContent(html, markdown);
31
+
32
+ expect(result.links.length).toBe(1);
33
+ expect(result.images.length).toBe(1);
34
+ expect(result.videos.length).toBe(1);
35
+ expect(result.markdown).toContain('link#1');
36
+ expect(result.markdown).toContain('image#1');
37
+ expect(result.markdown).toContain('video#1');
38
+ });
39
+
40
+ // Edge case tests
41
+ test('should handle links with parentheses and special characters', () => {
42
+ const html = `
43
+ <a href="https://example.com/page(1).html" title="Parens">Link with parens</a>
44
+ <a href="https://example.com/path?query=test&param=value">Link with query</a>
45
+ `;
46
+
47
+ const markdown = `
48
+ [Link with parens](https://example.com/page(1).html "Parens")
49
+ [Link with query](https://example.com/path?query=test&param=value)
50
+ `;
51
+
52
+ const result = processContent(html, markdown);
53
+
54
+ expect(result.links.length).toBe(2);
55
+ expect(result.markdown).toContain('link#1');
56
+ expect(result.markdown).toContain('link#2');
57
+ });
58
+
59
+ // Performance test with large files
60
+ test('should process large files efficiently', () => {
61
+ const html = fs.readFileSync('src/tools/search/test.html', 'utf-8');
62
+ const markdown = fs.readFileSync('src/tools/search/test.md', 'utf-8');
63
+
64
+ // const largeHtml = generateLargeHtml(1000); // 1000 links
65
+ // fs.writeFileSync('./temp.html', largeHtml);
66
+
67
+ // const largeMd = generateLargeMarkdown(1000); // 1000 links
68
+ // fs.writeFileSync('./temp.md', largeMd);
69
+
70
+ // const html = fs.readFileSync('./temp.html', 'utf-8');
71
+ // const markdown = fs.readFileSync('./temp.md', 'utf-8');
72
+
73
+ // Measure time taken to process
74
+ const startTime = process.hrtime();
75
+ const result = processContent(html, markdown);
76
+ const elapsed = process.hrtime(startTime);
77
+ const timeInMs = elapsed[0] * 1000 + elapsed[1] / 1000000;
78
+
79
+ console.log(
80
+ `Processed ${result.links.length} links, ${result.images.length} images, and ${result.videos.length} videos in ${timeInMs.toFixed(2)}ms`
81
+ );
82
+
83
+ // Basic validations for large file processing
84
+ expect(result.links.length).toBeGreaterThan(0);
85
+ expect(result.markdown).toContain('link#');
86
+
87
+ // Check if all links were replaced (sample check)
88
+ expect(result.markdown).not.toContain('https://example.com/link');
89
+ });
90
+
91
+ // Memory usage test
92
+ test('should have reasonable memory usage', () => {
93
+ const html = fs.readFileSync('src/tools/search/test.html', 'utf-8');
94
+ const markdown = fs.readFileSync('src/tools/search/test.md', 'utf-8');
95
+
96
+ const beforeMem = process.memoryUsage();
97
+ processContent(html, markdown);
98
+ const afterMem = process.memoryUsage();
99
+
100
+ const heapUsed = (afterMem.heapUsed - beforeMem.heapUsed) / 1024 / 1024; // MB
101
+
102
+ console.log(`Memory used: ${heapUsed.toFixed(2)} MB`);
103
+
104
+ // This is a loose check - actual thresholds depend on your environment
105
+ expect(heapUsed).toBeLessThan(100); // Should use less than 100MB additional heap
106
+ });
107
+
108
+ // Real-world file test (if available)
109
+ test('should process real-world Wikipedia content', () => {
110
+ // Try to find real-world test files if they exist
111
+ const wikiHtml = 'src/tools/search/test.html';
112
+ const wikiMd = 'src/tools/search/test.md';
113
+
114
+ if (fs.existsSync(wikiHtml) && fs.existsSync(wikiMd)) {
115
+ const html = fs.readFileSync(wikiHtml, 'utf-8');
116
+ const markdown = fs.readFileSync(wikiMd, 'utf-8');
117
+
118
+ const result = processContent(html, markdown);
119
+
120
+ console.log(
121
+ `Processed ${result.links.length} Wikipedia links, ${result.images.length} images, and ${result.videos.length} videos`
122
+ );
123
+
124
+ expect(result.links.length).toBeGreaterThan(10); // Wikipedia articles typically have many links
125
+ expect(result.markdown).not.toMatch(/\]\(https?:\/\/[^\s")]+\)/); // No regular URLs should remain
126
+ } else {
127
+ console.log('Wikipedia test files not found, skipping this test');
128
+ }
129
+ });
130
+ });
131
+
132
+ // Helper function to generate large HTML test data
133
+ function generateLargeHtml(linkCount: number): string {
134
+ let html = '<html><body>';
135
+
136
+ for (let i = 1; i <= linkCount; i++) {
137
+ html += `<p>Paragraph ${i} with <a href="https://example.com/link${i}" title="Link ${i}">link ${i}</a>`;
138
+
139
+ if (i % 10 === 0) {
140
+ html += ` and <img src="https://example.com/image${i / 10}.jpg" alt="Image ${i / 10}">`;
141
+ }
142
+
143
+ if (i % 50 === 0) {
144
+ html += ` and <video src="https://example.com/video${i / 50}.mp4" title="Video ${i / 50}"></video>`;
145
+ }
146
+
147
+ html += '</p>';
148
+ }
149
+
150
+ html += '</body></html>';
151
+ return html;
152
+ }
153
+
154
+ /** Helper function to generate large Markdown test data */
155
+ function generateLargeMarkdown(linkCount: number): string {
156
+ let markdown = '# Test Document\n\n';
157
+
158
+ for (let i = 1; i <= linkCount; i++) {
159
+ markdown += `Paragraph ${i} with [link ${i}](https://example.com/link${i} "Link ${i}")`;
160
+
161
+ if (i % 10 === 0) {
162
+ markdown += ` and ![Image ${i / 10}](https://example.com/image${i / 10}.jpg)`;
163
+ }
164
+
165
+ if (i % 50 === 0) {
166
+ markdown += ` and [Video ${i / 50}](https://example.com/video${i / 50}.mp4 "Video ${i / 50}")`;
167
+ }
168
+
169
+ markdown += '\n\n';
170
+ }
171
+
172
+ return markdown;
173
+ }
@@ -0,0 +1,147 @@
1
+ import * as cheerio from 'cheerio';
2
+ import type { References, MediaReference } from './types';
3
+
4
+ export function processContent(
5
+ html: string,
6
+ markdown: string
7
+ ): {
8
+ markdown: string;
9
+ } & References {
10
+ const linkMap = new Map<string, MediaReference>();
11
+ const imageMap = new Map<string, MediaReference>();
12
+ const videoMap = new Map<string, MediaReference>();
13
+ const iframeMap = new Map<string, MediaReference>();
14
+
15
+ const $ = cheerio.load(html, {
16
+ xmlMode: false,
17
+ });
18
+
19
+ // Extract all media references
20
+ $('a[href]').each((_, el) => {
21
+ const href = $(el).attr('href');
22
+ if (href != null && href) {
23
+ linkMap.set(href, {
24
+ originalUrl: href,
25
+ title: $(el).attr('title'),
26
+ text: $(el).text().trim(),
27
+ });
28
+ }
29
+ });
30
+
31
+ $('img[src]').each((_, el) => {
32
+ const src = $(el).attr('src');
33
+ if (src != null && src) {
34
+ imageMap.set(src, {
35
+ originalUrl: src,
36
+ title: $(el).attr('alt') ?? $(el).attr('title'),
37
+ });
38
+ }
39
+ });
40
+
41
+ // Handle videos (dedicated video elements and video platforms in iframes)
42
+ $('video[src], iframe[src*="youtube"], iframe[src*="vimeo"]').each(
43
+ (_, el) => {
44
+ const src = $(el).attr('src');
45
+ if (src != null && src) {
46
+ videoMap.set(src, {
47
+ originalUrl: src,
48
+ title: $(el).attr('title'),
49
+ });
50
+ }
51
+ }
52
+ );
53
+
54
+ // Handle all other generic iframes that aren't already captured as videos
55
+ $('iframe').each((_, el) => {
56
+ const src = $(el).attr('src');
57
+ if (
58
+ src != null &&
59
+ src &&
60
+ !src.includes('youtube') &&
61
+ !src.includes('vimeo')
62
+ ) {
63
+ iframeMap.set(src, {
64
+ originalUrl: src,
65
+ title: $(el).attr('title'),
66
+ });
67
+ }
68
+ });
69
+
70
+ // Create lookup maps with indices
71
+ const linkIndexMap = new Map<string, number>();
72
+ const imageIndexMap = new Map<string, number>();
73
+ const videoIndexMap = new Map<string, number>();
74
+ const iframeIndexMap = new Map<string, number>();
75
+
76
+ Array.from(linkMap.keys()).forEach((url, i) => linkIndexMap.set(url, i + 1));
77
+ Array.from(imageMap.keys()).forEach((url, i) =>
78
+ imageIndexMap.set(url, i + 1)
79
+ );
80
+ Array.from(videoMap.keys()).forEach((url, i) =>
81
+ videoIndexMap.set(url, i + 1)
82
+ );
83
+ Array.from(iframeMap.keys()).forEach((url, i) =>
84
+ iframeIndexMap.set(url, i + 1)
85
+ );
86
+
87
+ // Process the markdown
88
+ let result = markdown;
89
+
90
+ // Replace each URL one by one, starting with the longest URLs first to avoid partial matches
91
+ const allUrls = [
92
+ ...Array.from(imageMap.keys()).map((url) => ({
93
+ url,
94
+ type: 'image',
95
+ idx: imageIndexMap.get(url),
96
+ })),
97
+ ...Array.from(videoMap.keys()).map((url) => ({
98
+ url,
99
+ type: 'video',
100
+ idx: videoIndexMap.get(url),
101
+ })),
102
+ ...Array.from(iframeMap.keys()).map((url) => ({
103
+ url,
104
+ type: 'iframe',
105
+ idx: iframeIndexMap.get(url),
106
+ })),
107
+ ...Array.from(linkMap.keys()).map((url) => ({
108
+ url,
109
+ type: 'link',
110
+ idx: linkIndexMap.get(url),
111
+ })),
112
+ ].sort((a, b) => b.url.length - a.url.length);
113
+
114
+ // Create a function to escape special characters in URLs for regex
115
+ function escapeRegex(string: string): string {
116
+ return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
117
+ }
118
+
119
+ // Replace each URL in the markdown
120
+ for (const { url, type, idx } of allUrls) {
121
+ // Create a regex that captures URLs in markdown links
122
+ const regex = new RegExp(`\\(${escapeRegex(url)}(?:\\s+"[^"]*")?\\)`, 'g');
123
+
124
+ result = result.replace(regex, (match) => {
125
+ // Keep any title attribute that might exist
126
+ const titleMatch = match.match(/\s+"([^"]*)"/);
127
+ const titlePart = titleMatch ? ` "${titleMatch[1]}"` : '';
128
+
129
+ return `(${type}#${idx}${titlePart})`;
130
+ });
131
+ }
132
+
133
+ iframeMap.clear();
134
+ const links = Array.from(linkMap.values());
135
+ linkMap.clear();
136
+ const images = Array.from(imageMap.values());
137
+ imageMap.clear();
138
+ const videos = Array.from(videoMap.values());
139
+ videoMap.clear();
140
+
141
+ return {
142
+ markdown: result,
143
+ links,
144
+ images,
145
+ videos,
146
+ };
147
+ }
@@ -0,0 +1,158 @@
1
+ import axios from 'axios';
2
+ import { processContent } from './content';
3
+ import type * as t from './types';
4
+ import { createDefaultLogger } from './utils';
5
+
6
+ /**
7
+ * Firecrawl scraper implementation
8
+ * Uses the Firecrawl API to scrape web pages
9
+ */
10
+ export class FirecrawlScraper {
11
+ private apiKey: string;
12
+ private apiUrl: string;
13
+ private defaultFormats: string[];
14
+ private timeout: number;
15
+ private logger: t.Logger;
16
+
17
+ constructor(config: t.FirecrawlScraperConfig = {}) {
18
+ this.apiKey = config.apiKey ?? process.env.FIRECRAWL_API_KEY ?? '';
19
+
20
+ const baseUrl =
21
+ config.apiUrl ??
22
+ process.env.FIRECRAWL_BASE_URL ??
23
+ 'https://api.firecrawl.dev';
24
+ this.apiUrl = `${baseUrl.replace(/\/+$/, '')}/v1/scrape`;
25
+
26
+ this.defaultFormats = config.formats ?? ['markdown', 'html'];
27
+ this.timeout = config.timeout ?? 15000;
28
+
29
+ this.logger = config.logger || createDefaultLogger();
30
+
31
+ if (!this.apiKey) {
32
+ this.logger.warn('FIRECRAWL_API_KEY is not set. Scraping will not work.');
33
+ }
34
+
35
+ this.logger.debug(
36
+ `Firecrawl scraper initialized with API URL: ${this.apiUrl}`
37
+ );
38
+ }
39
+
40
+ /**
41
+ * Scrape a single URL
42
+ * @param url URL to scrape
43
+ * @param options Scrape options
44
+ * @returns Scrape response
45
+ */
46
+ async scrapeUrl(
47
+ url: string,
48
+ options: t.FirecrawlScrapeOptions = {}
49
+ ): Promise<[string, t.FirecrawlScrapeResponse]> {
50
+ if (!this.apiKey) {
51
+ return [
52
+ url,
53
+ {
54
+ success: false,
55
+ error: 'FIRECRAWL_API_KEY is not set',
56
+ },
57
+ ];
58
+ }
59
+
60
+ try {
61
+ const response = await axios.post(
62
+ this.apiUrl,
63
+ {
64
+ url,
65
+ formats: options.formats || this.defaultFormats,
66
+ includeTags: options.includeTags,
67
+ excludeTags: options.excludeTags,
68
+ headers: options.headers,
69
+ waitFor: options.waitFor,
70
+ timeout: options.timeout ?? this.timeout,
71
+ },
72
+ {
73
+ headers: {
74
+ 'Content-Type': 'application/json',
75
+ Authorization: `Bearer ${this.apiKey}`,
76
+ },
77
+ timeout: this.timeout,
78
+ }
79
+ );
80
+
81
+ return [url, response.data];
82
+ } catch (error) {
83
+ const errorMessage =
84
+ error instanceof Error ? error.message : String(error);
85
+ return [
86
+ url,
87
+ {
88
+ success: false,
89
+ error: `Firecrawl API request failed: ${errorMessage}`,
90
+ },
91
+ ];
92
+ }
93
+ }
94
+
95
+ /**
96
+ * Extract content from scrape response
97
+ * @param response Scrape response
98
+ * @returns Extracted content or empty string if not available
99
+ */
100
+ extractContent(
101
+ response: t.FirecrawlScrapeResponse
102
+ ): [string, undefined | t.References] {
103
+ if (!response.success || !response.data) {
104
+ return ['', undefined];
105
+ }
106
+
107
+ if (response.data.markdown != null && response.data.html != null) {
108
+ try {
109
+ const { markdown, ...rest } = processContent(
110
+ response.data.html,
111
+ response.data.markdown
112
+ );
113
+ return [markdown, rest];
114
+ } catch (error) {
115
+ this.logger.error('Error processing content:', error);
116
+ return [response.data.markdown, undefined];
117
+ }
118
+ } else if (response.data.markdown != null) {
119
+ return [response.data.markdown, undefined];
120
+ }
121
+
122
+ // Fall back to HTML content
123
+ if (response.data.html != null) {
124
+ return [response.data.html, undefined];
125
+ }
126
+
127
+ // Fall back to raw HTML content
128
+ if (response.data.rawHtml != null) {
129
+ return [response.data.rawHtml, undefined];
130
+ }
131
+
132
+ return ['', undefined];
133
+ }
134
+
135
+ /**
136
+ * Extract metadata from scrape response
137
+ * @param response Scrape response
138
+ * @returns Metadata object
139
+ */
140
+ extractMetadata(response: t.FirecrawlScrapeResponse): t.ScrapeMetadata {
141
+ if (!response.success || !response.data || !response.data.metadata) {
142
+ return {};
143
+ }
144
+
145
+ return response.data.metadata;
146
+ }
147
+ }
148
+
149
+ /**
150
+ * Create a Firecrawl scraper instance
151
+ * @param config Scraper configuration
152
+ * @returns Firecrawl scraper instance
153
+ */
154
+ export const createFirecrawlScraper = (
155
+ config: t.FirecrawlScraperConfig = {}
156
+ ): FirecrawlScraper => {
157
+ return new FirecrawlScraper(config);
158
+ };