@librechat/agents 2.4.317 → 2.4.319

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/dist/cjs/events.cjs +3 -3
  2. package/dist/cjs/events.cjs.map +1 -1
  3. package/dist/cjs/main.cjs +5 -2
  4. package/dist/cjs/main.cjs.map +1 -1
  5. package/dist/cjs/messages/ids.cjs +23 -0
  6. package/dist/cjs/messages/ids.cjs.map +1 -0
  7. package/dist/cjs/stream.cjs +8 -155
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/cjs/tools/handlers.cjs +144 -0
  10. package/dist/cjs/tools/handlers.cjs.map +1 -0
  11. package/dist/cjs/tools/search/content.cjs +140 -0
  12. package/dist/cjs/tools/search/content.cjs.map +1 -0
  13. package/dist/cjs/tools/search/firecrawl.cjs +17 -37
  14. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  15. package/dist/cjs/tools/search/format.cjs +79 -29
  16. package/dist/cjs/tools/search/format.cjs.map +1 -1
  17. package/dist/cjs/tools/search/highlights.cjs +64 -13
  18. package/dist/cjs/tools/search/highlights.cjs.map +1 -1
  19. package/dist/cjs/tools/search/search.cjs +13 -15
  20. package/dist/cjs/tools/search/search.cjs.map +1 -1
  21. package/dist/cjs/tools/search/tool.cjs +42 -12
  22. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  23. package/dist/cjs/tools/search/utils.cjs +35 -0
  24. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  25. package/dist/esm/events.mjs +1 -1
  26. package/dist/esm/events.mjs.map +1 -1
  27. package/dist/esm/main.mjs +3 -1
  28. package/dist/esm/main.mjs.map +1 -1
  29. package/dist/esm/messages/ids.mjs +21 -0
  30. package/dist/esm/messages/ids.mjs.map +1 -0
  31. package/dist/esm/stream.mjs +7 -152
  32. package/dist/esm/stream.mjs.map +1 -1
  33. package/dist/esm/tools/handlers.mjs +141 -0
  34. package/dist/esm/tools/handlers.mjs.map +1 -0
  35. package/dist/esm/tools/search/content.mjs +119 -0
  36. package/dist/esm/tools/search/content.mjs.map +1 -0
  37. package/dist/esm/tools/search/firecrawl.mjs +18 -37
  38. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  39. package/dist/esm/tools/search/format.mjs +79 -29
  40. package/dist/esm/tools/search/format.mjs.map +1 -1
  41. package/dist/esm/tools/search/highlights.mjs +64 -13
  42. package/dist/esm/tools/search/highlights.mjs.map +1 -1
  43. package/dist/esm/tools/search/search.mjs +12 -14
  44. package/dist/esm/tools/search/search.mjs.map +1 -1
  45. package/dist/esm/tools/search/tool.mjs +42 -12
  46. package/dist/esm/tools/search/tool.mjs.map +1 -1
  47. package/dist/esm/tools/search/utils.mjs +32 -0
  48. package/dist/esm/tools/search/utils.mjs.map +1 -0
  49. package/dist/types/index.d.ts +1 -0
  50. package/dist/types/messages/ids.d.ts +3 -0
  51. package/dist/types/messages/index.d.ts +1 -0
  52. package/dist/types/stream.d.ts +0 -8
  53. package/dist/types/tools/handlers.d.ts +8 -0
  54. package/dist/types/tools/search/content.d.ts +4 -0
  55. package/dist/types/tools/search/firecrawl.d.ts +6 -86
  56. package/dist/types/tools/search/format.d.ts +4 -1
  57. package/dist/types/tools/search/highlights.d.ts +1 -1
  58. package/dist/types/tools/search/search.d.ts +1 -1
  59. package/dist/types/tools/search/test.d.ts +1 -0
  60. package/dist/types/tools/search/tool.d.ts +12 -4
  61. package/dist/types/tools/search/types.d.ts +388 -53
  62. package/dist/types/tools/search/utils.d.ts +3 -0
  63. package/package.json +2 -1
  64. package/src/events.ts +49 -15
  65. package/src/index.ts +1 -0
  66. package/src/messages/ids.ts +26 -0
  67. package/src/messages/index.ts +1 -0
  68. package/src/scripts/search.ts +5 -3
  69. package/src/stream.ts +4 -186
  70. package/src/tools/handlers.ts +167 -0
  71. package/src/tools/search/content.test.ts +173 -0
  72. package/src/tools/search/content.ts +147 -0
  73. package/src/tools/search/firecrawl.ts +27 -144
  74. package/src/tools/search/format.ts +89 -31
  75. package/src/tools/search/highlights.ts +99 -17
  76. package/src/tools/search/output.md +2775 -0
  77. package/src/tools/search/search.ts +42 -54
  78. package/src/tools/search/test.html +884 -0
  79. package/src/tools/search/test.md +643 -0
  80. package/src/tools/search/test.ts +159 -0
  81. package/src/tools/search/tool.ts +52 -15
  82. package/src/tools/search/types.ts +439 -61
  83. package/src/tools/search/utils.ts +43 -0
@@ -0,0 +1,173 @@
1
+ /* eslint-disable @typescript-eslint/no-unused-vars */
2
+ /* eslint-disable no-console */
3
+ // content.test.ts
4
+ import * as fs from 'fs';
5
+ import { processContent } from './content';
6
+
7
+ describe('Link Processor', () => {
8
+ afterAll(() => {
9
+ if (fs.existsSync('./temp.html')) {
10
+ fs.unlinkSync('./temp.html');
11
+ }
12
+ if (fs.existsSync('./temp.md')) {
13
+ fs.unlinkSync('./temp.md');
14
+ }
15
+ });
16
+ // Basic functionality tests
17
+ test('should replace basic links with references', () => {
18
+ const html = `
19
+ <p>Test with <a href="https://example.com/link" title="Example">a link</a></p>
20
+ <p>And an <img src="https://example.com/img.jpg" alt="image"></p>
21
+ <p>Plus a <video src="https://example.com/video.mp4"></video></p>
22
+ `;
23
+
24
+ const markdown = `
25
+ Test with [a link](https://example.com/link "Example")
26
+ And an ![image](https://example.com/img.jpg)
27
+ Plus a [video](https://example.com/video.mp4)
28
+ `;
29
+
30
+ const result = processContent(html, markdown);
31
+
32
+ expect(result.links.length).toBe(1);
33
+ expect(result.images.length).toBe(1);
34
+ expect(result.videos.length).toBe(1);
35
+ expect(result.markdown).toContain('link#1');
36
+ expect(result.markdown).toContain('image#1');
37
+ expect(result.markdown).toContain('video#1');
38
+ });
39
+
40
+ // Edge case tests
41
+ test('should handle links with parentheses and special characters', () => {
42
+ const html = `
43
+ <a href="https://example.com/page(1).html" title="Parens">Link with parens</a>
44
+ <a href="https://example.com/path?query=test&param=value">Link with query</a>
45
+ `;
46
+
47
+ const markdown = `
48
+ [Link with parens](https://example.com/page(1).html "Parens")
49
+ [Link with query](https://example.com/path?query=test&param=value)
50
+ `;
51
+
52
+ const result = processContent(html, markdown);
53
+
54
+ expect(result.links.length).toBe(2);
55
+ expect(result.markdown).toContain('link#1');
56
+ expect(result.markdown).toContain('link#2');
57
+ });
58
+
59
+ // Performance test with large files
60
+ test('should process large files efficiently', () => {
61
+ const html = fs.readFileSync('src/tools/search/test.html', 'utf-8');
62
+ const markdown = fs.readFileSync('src/tools/search/test.md', 'utf-8');
63
+
64
+ // const largeHtml = generateLargeHtml(1000); // 1000 links
65
+ // fs.writeFileSync('./temp.html', largeHtml);
66
+
67
+ // const largeMd = generateLargeMarkdown(1000); // 1000 links
68
+ // fs.writeFileSync('./temp.md', largeMd);
69
+
70
+ // const html = fs.readFileSync('./temp.html', 'utf-8');
71
+ // const markdown = fs.readFileSync('./temp.md', 'utf-8');
72
+
73
+ // Measure time taken to process
74
+ const startTime = process.hrtime();
75
+ const result = processContent(html, markdown);
76
+ const elapsed = process.hrtime(startTime);
77
+ const timeInMs = elapsed[0] * 1000 + elapsed[1] / 1000000;
78
+
79
+ console.log(
80
+ `Processed ${result.links.length} links, ${result.images.length} images, and ${result.videos.length} videos in ${timeInMs.toFixed(2)}ms`
81
+ );
82
+
83
+ // Basic validations for large file processing
84
+ expect(result.links.length).toBeGreaterThan(0);
85
+ expect(result.markdown).toContain('link#');
86
+
87
+ // Check if all links were replaced (sample check)
88
+ expect(result.markdown).not.toContain('https://example.com/link');
89
+ });
90
+
91
+ // Memory usage test
92
+ test('should have reasonable memory usage', () => {
93
+ const html = fs.readFileSync('src/tools/search/test.html', 'utf-8');
94
+ const markdown = fs.readFileSync('src/tools/search/test.md', 'utf-8');
95
+
96
+ const beforeMem = process.memoryUsage();
97
+ processContent(html, markdown);
98
+ const afterMem = process.memoryUsage();
99
+
100
+ const heapUsed = (afterMem.heapUsed - beforeMem.heapUsed) / 1024 / 1024; // MB
101
+
102
+ console.log(`Memory used: ${heapUsed.toFixed(2)} MB`);
103
+
104
+ // This is a loose check - actual thresholds depend on your environment
105
+ expect(heapUsed).toBeLessThan(100); // Should use less than 100MB additional heap
106
+ });
107
+
108
+ // Real-world file test (if available)
109
+ test('should process real-world Wikipedia content', () => {
110
+ // Try to find real-world test files if they exist
111
+ const wikiHtml = 'src/tools/search/test.html';
112
+ const wikiMd = 'src/tools/search/test.md';
113
+
114
+ if (fs.existsSync(wikiHtml) && fs.existsSync(wikiMd)) {
115
+ const html = fs.readFileSync(wikiHtml, 'utf-8');
116
+ const markdown = fs.readFileSync(wikiMd, 'utf-8');
117
+
118
+ const result = processContent(html, markdown);
119
+
120
+ console.log(
121
+ `Processed ${result.links.length} Wikipedia links, ${result.images.length} images, and ${result.videos.length} videos`
122
+ );
123
+
124
+ expect(result.links.length).toBeGreaterThan(10); // Wikipedia articles typically have many links
125
+ expect(result.markdown).not.toMatch(/\]\(https?:\/\/[^\s")]+\)/); // No regular URLs should remain
126
+ } else {
127
+ console.log('Wikipedia test files not found, skipping this test');
128
+ }
129
+ });
130
+ });
131
+
132
+ // Helper function to generate large HTML test data
133
+ function generateLargeHtml(linkCount: number): string {
134
+ let html = '<html><body>';
135
+
136
+ for (let i = 1; i <= linkCount; i++) {
137
+ html += `<p>Paragraph ${i} with <a href="https://example.com/link${i}" title="Link ${i}">link ${i}</a>`;
138
+
139
+ if (i % 10 === 0) {
140
+ html += ` and <img src="https://example.com/image${i / 10}.jpg" alt="Image ${i / 10}">`;
141
+ }
142
+
143
+ if (i % 50 === 0) {
144
+ html += ` and <video src="https://example.com/video${i / 50}.mp4" title="Video ${i / 50}"></video>`;
145
+ }
146
+
147
+ html += '</p>';
148
+ }
149
+
150
+ html += '</body></html>';
151
+ return html;
152
+ }
153
+
154
+ /** Helper function to generate large Markdown test data */
155
+ function generateLargeMarkdown(linkCount: number): string {
156
+ let markdown = '# Test Document\n\n';
157
+
158
+ for (let i = 1; i <= linkCount; i++) {
159
+ markdown += `Paragraph ${i} with [link ${i}](https://example.com/link${i} "Link ${i}")`;
160
+
161
+ if (i % 10 === 0) {
162
+ markdown += ` and ![Image ${i / 10}](https://example.com/image${i / 10}.jpg)`;
163
+ }
164
+
165
+ if (i % 50 === 0) {
166
+ markdown += ` and [Video ${i / 50}](https://example.com/video${i / 50}.mp4 "Video ${i / 50}")`;
167
+ }
168
+
169
+ markdown += '\n\n';
170
+ }
171
+
172
+ return markdown;
173
+ }
@@ -0,0 +1,147 @@
1
+ import * as cheerio from 'cheerio';
2
+ import type { References, MediaReference } from './types';
3
+
4
+ export function processContent(
5
+ html: string,
6
+ markdown: string
7
+ ): {
8
+ markdown: string;
9
+ } & References {
10
+ const linkMap = new Map<string, MediaReference>();
11
+ const imageMap = new Map<string, MediaReference>();
12
+ const videoMap = new Map<string, MediaReference>();
13
+ const iframeMap = new Map<string, MediaReference>();
14
+
15
+ const $ = cheerio.load(html, {
16
+ xmlMode: false,
17
+ });
18
+
19
+ // Extract all media references
20
+ $('a[href]').each((_, el) => {
21
+ const href = $(el).attr('href');
22
+ if (href != null && href) {
23
+ linkMap.set(href, {
24
+ originalUrl: href,
25
+ title: $(el).attr('title'),
26
+ text: $(el).text().trim(),
27
+ });
28
+ }
29
+ });
30
+
31
+ $('img[src]').each((_, el) => {
32
+ const src = $(el).attr('src');
33
+ if (src != null && src) {
34
+ imageMap.set(src, {
35
+ originalUrl: src,
36
+ title: $(el).attr('alt') ?? $(el).attr('title'),
37
+ });
38
+ }
39
+ });
40
+
41
+ // Handle videos (dedicated video elements and video platforms in iframes)
42
+ $('video[src], iframe[src*="youtube"], iframe[src*="vimeo"]').each(
43
+ (_, el) => {
44
+ const src = $(el).attr('src');
45
+ if (src != null && src) {
46
+ videoMap.set(src, {
47
+ originalUrl: src,
48
+ title: $(el).attr('title'),
49
+ });
50
+ }
51
+ }
52
+ );
53
+
54
+ // Handle all other generic iframes that aren't already captured as videos
55
+ $('iframe').each((_, el) => {
56
+ const src = $(el).attr('src');
57
+ if (
58
+ src != null &&
59
+ src &&
60
+ !src.includes('youtube') &&
61
+ !src.includes('vimeo')
62
+ ) {
63
+ iframeMap.set(src, {
64
+ originalUrl: src,
65
+ title: $(el).attr('title'),
66
+ });
67
+ }
68
+ });
69
+
70
+ // Create lookup maps with indices
71
+ const linkIndexMap = new Map<string, number>();
72
+ const imageIndexMap = new Map<string, number>();
73
+ const videoIndexMap = new Map<string, number>();
74
+ const iframeIndexMap = new Map<string, number>();
75
+
76
+ Array.from(linkMap.keys()).forEach((url, i) => linkIndexMap.set(url, i + 1));
77
+ Array.from(imageMap.keys()).forEach((url, i) =>
78
+ imageIndexMap.set(url, i + 1)
79
+ );
80
+ Array.from(videoMap.keys()).forEach((url, i) =>
81
+ videoIndexMap.set(url, i + 1)
82
+ );
83
+ Array.from(iframeMap.keys()).forEach((url, i) =>
84
+ iframeIndexMap.set(url, i + 1)
85
+ );
86
+
87
+ // Process the markdown
88
+ let result = markdown;
89
+
90
+ // Replace each URL one by one, starting with the longest URLs first to avoid partial matches
91
+ const allUrls = [
92
+ ...Array.from(imageMap.keys()).map((url) => ({
93
+ url,
94
+ type: 'image',
95
+ idx: imageIndexMap.get(url),
96
+ })),
97
+ ...Array.from(videoMap.keys()).map((url) => ({
98
+ url,
99
+ type: 'video',
100
+ idx: videoIndexMap.get(url),
101
+ })),
102
+ ...Array.from(iframeMap.keys()).map((url) => ({
103
+ url,
104
+ type: 'iframe',
105
+ idx: iframeIndexMap.get(url),
106
+ })),
107
+ ...Array.from(linkMap.keys()).map((url) => ({
108
+ url,
109
+ type: 'link',
110
+ idx: linkIndexMap.get(url),
111
+ })),
112
+ ].sort((a, b) => b.url.length - a.url.length);
113
+
114
+ // Create a function to escape special characters in URLs for regex
115
+ function escapeRegex(string: string): string {
116
+ return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
117
+ }
118
+
119
+ // Replace each URL in the markdown
120
+ for (const { url, type, idx } of allUrls) {
121
+ // Create a regex that captures URLs in markdown links
122
+ const regex = new RegExp(`\\(${escapeRegex(url)}(?:\\s+"[^"]*")?\\)`, 'g');
123
+
124
+ result = result.replace(regex, (match) => {
125
+ // Keep any title attribute that might exist
126
+ const titleMatch = match.match(/\s+"([^"]*)"/);
127
+ const titlePart = titleMatch ? ` "${titleMatch[1]}"` : '';
128
+
129
+ return `(${type}#${idx}${titlePart})`;
130
+ });
131
+ }
132
+
133
+ iframeMap.clear();
134
+ const links = Array.from(linkMap.values());
135
+ linkMap.clear();
136
+ const images = Array.from(imageMap.values());
137
+ imageMap.clear();
138
+ const videos = Array.from(videoMap.values());
139
+ videoMap.clear();
140
+
141
+ return {
142
+ markdown: result,
143
+ links,
144
+ images,
145
+ videos,
146
+ };
147
+ }
@@ -1,136 +1,7 @@
1
1
  /* eslint-disable no-console */
2
2
  import axios from 'axios';
3
-
4
- export interface FirecrawlScrapeOptions {
5
- formats?: string[];
6
- includeTags?: string[];
7
- excludeTags?: string[];
8
- headers?: Record<string, string>;
9
- waitFor?: number;
10
- timeout?: number;
11
- }
12
-
13
- interface ScrapeMetadata {
14
- // Core source information
15
- sourceURL?: string;
16
- url?: string;
17
- scrapeId?: string;
18
- statusCode?: number;
19
- // Basic metadata
20
- title?: string;
21
- description?: string;
22
- language?: string;
23
- favicon?: string;
24
- viewport?: string;
25
- robots?: string;
26
- 'theme-color'?: string;
27
- // Open Graph metadata
28
- 'og:url'?: string;
29
- 'og:title'?: string;
30
- 'og:description'?: string;
31
- 'og:type'?: string;
32
- 'og:image'?: string;
33
- 'og:image:width'?: string;
34
- 'og:image:height'?: string;
35
- 'og:site_name'?: string;
36
- ogUrl?: string;
37
- ogTitle?: string;
38
- ogDescription?: string;
39
- ogImage?: string;
40
- ogSiteName?: string;
41
- // Article metadata
42
- 'article:author'?: string;
43
- 'article:published_time'?: string;
44
- 'article:modified_time'?: string;
45
- 'article:section'?: string;
46
- 'article:tag'?: string;
47
- 'article:publisher'?: string;
48
- publishedTime?: string;
49
- modifiedTime?: string;
50
- // Twitter metadata
51
- 'twitter:site'?: string;
52
- 'twitter:creator'?: string;
53
- 'twitter:card'?: string;
54
- 'twitter:image'?: string;
55
- 'twitter:dnt'?: string;
56
- 'twitter:app:name:iphone'?: string;
57
- 'twitter:app:id:iphone'?: string;
58
- 'twitter:app:url:iphone'?: string;
59
- 'twitter:app:name:ipad'?: string;
60
- 'twitter:app:id:ipad'?: string;
61
- 'twitter:app:url:ipad'?: string;
62
- 'twitter:app:name:googleplay'?: string;
63
- 'twitter:app:id:googleplay'?: string;
64
- 'twitter:app:url:googleplay'?: string;
65
- // Facebook metadata
66
- 'fb:app_id'?: string;
67
- // App links
68
- 'al:ios:url'?: string;
69
- 'al:ios:app_name'?: string;
70
- 'al:ios:app_store_id'?: string;
71
- // Allow for additional properties that might be present
72
- [key: string]: string | number | boolean | null | undefined;
73
- }
74
-
75
- export interface FirecrawlScrapeResponse {
76
- success: boolean;
77
- data?: {
78
- markdown?: string;
79
- html?: string;
80
- rawHtml?: string;
81
- screenshot?: string;
82
- links?: string[];
83
- metadata?: ScrapeMetadata;
84
- };
85
- error?: string;
86
- }
87
-
88
- export interface FirecrawlScraperConfig {
89
- apiKey?: string;
90
- apiUrl?: string;
91
- formats?: string[];
92
- timeout?: number;
93
- }
94
- const getDomainName = (
95
- link: string,
96
- metadata?: ScrapeMetadata
97
- ): string | undefined => {
98
- try {
99
- const url = metadata?.sourceURL ?? metadata?.url ?? (link || '');
100
- const domain = new URL(url).hostname.replace(/^www\./, '');
101
- if (domain) {
102
- return domain;
103
- }
104
- } catch (e) {
105
- // URL parsing failed
106
- console.error('Error parsing URL:', e);
107
- }
108
-
109
- return;
110
- };
111
-
112
- export function getAttribution(
113
- link: string,
114
- metadata?: ScrapeMetadata
115
- ): string | undefined {
116
- if (!metadata) return getDomainName(link, metadata);
117
-
118
- const possibleAttributions = [
119
- metadata.ogSiteName,
120
- metadata['og:site_name'],
121
- metadata.title?.split('|').pop()?.trim(),
122
- metadata['twitter:site']?.replace(/^@/, ''),
123
- ];
124
-
125
- const attribution = possibleAttributions.find(
126
- (attr) => attr != null && typeof attr === 'string' && attr.trim() !== ''
127
- );
128
- if (attribution != null) {
129
- return attribution;
130
- }
131
-
132
- return getDomainName(link, metadata);
133
- }
3
+ import { processContent } from './content';
4
+ import type * as t from './types';
134
5
 
135
6
  /**
136
7
  * Firecrawl scraper implementation
@@ -142,7 +13,7 @@ export class FirecrawlScraper {
142
13
  private defaultFormats: string[];
143
14
  private timeout: number;
144
15
 
145
- constructor(config: FirecrawlScraperConfig = {}) {
16
+ constructor(config: t.FirecrawlScraperConfig = {}) {
146
17
  this.apiKey = config.apiKey ?? process.env.FIRECRAWL_API_KEY ?? '';
147
18
 
148
19
  const baseUrl =
@@ -169,8 +40,8 @@ export class FirecrawlScraper {
169
40
  */
170
41
  async scrapeUrl(
171
42
  url: string,
172
- options: FirecrawlScrapeOptions = {}
173
- ): Promise<[string, FirecrawlScrapeResponse]> {
43
+ options: t.FirecrawlScrapeOptions = {}
44
+ ): Promise<[string, t.FirecrawlScrapeResponse]> {
174
45
  if (!this.apiKey) {
175
46
  return [
176
47
  url,
@@ -221,27 +92,39 @@ export class FirecrawlScraper {
221
92
  * @param response Scrape response
222
93
  * @returns Extracted content or empty string if not available
223
94
  */
224
- extractContent(response: FirecrawlScrapeResponse): string {
95
+ extractContent(
96
+ response: t.FirecrawlScrapeResponse
97
+ ): [string, undefined | t.References] {
225
98
  if (!response.success || !response.data) {
226
- return '';
99
+ return ['', undefined];
227
100
  }
228
101
 
229
- // Prefer markdown content if available
230
- if (response.data.markdown != null) {
231
- return response.data.markdown;
102
+ if (response.data.markdown != null && response.data.html != null) {
103
+ try {
104
+ const { markdown, ...rest } = processContent(
105
+ response.data.html,
106
+ response.data.markdown
107
+ );
108
+ return [markdown, rest];
109
+ } catch (error) {
110
+ console.error('Error processing content:', error);
111
+ return [response.data.markdown, undefined];
112
+ }
113
+ } else if (response.data.markdown != null) {
114
+ return [response.data.markdown, undefined];
232
115
  }
233
116
 
234
117
  // Fall back to HTML content
235
118
  if (response.data.html != null) {
236
- return response.data.html;
119
+ return [response.data.html, undefined];
237
120
  }
238
121
 
239
122
  // Fall back to raw HTML content
240
123
  if (response.data.rawHtml != null) {
241
- return response.data.rawHtml;
124
+ return [response.data.rawHtml, undefined];
242
125
  }
243
126
 
244
- return '';
127
+ return ['', undefined];
245
128
  }
246
129
 
247
130
  /**
@@ -249,7 +132,7 @@ export class FirecrawlScraper {
249
132
  * @param response Scrape response
250
133
  * @returns Metadata object
251
134
  */
252
- extractMetadata(response: FirecrawlScrapeResponse): ScrapeMetadata {
135
+ extractMetadata(response: t.FirecrawlScrapeResponse): t.ScrapeMetadata {
253
136
  if (!response.success || !response.data || !response.data.metadata) {
254
137
  return {};
255
138
  }
@@ -264,7 +147,7 @@ export class FirecrawlScraper {
264
147
  * @returns Firecrawl scraper instance
265
148
  */
266
149
  export const createFirecrawlScraper = (
267
- config: FirecrawlScraperConfig = {}
150
+ config: t.FirecrawlScraperConfig = {}
268
151
  ): FirecrawlScraper => {
269
152
  return new FirecrawlScraper(config);
270
153
  };