firecrawl 1.29.3 → 3.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.env.example +4 -2
  2. package/README.md +85 -78
  3. package/audit-ci.jsonc +4 -0
  4. package/dist/chunk-OIZ6OKY4.js +85 -0
  5. package/dist/index.cjs +961 -35
  6. package/dist/index.d.cts +524 -11
  7. package/dist/index.d.ts +524 -11
  8. package/dist/index.js +953 -27
  9. package/dist/package-V5IPFKBE.js +4 -0
  10. package/package.json +6 -6
  11. package/src/__tests__/e2e/v2/batch.test.ts +74 -0
  12. package/src/__tests__/e2e/v2/crawl.test.ts +182 -0
  13. package/src/__tests__/e2e/v2/extract.test.ts +70 -0
  14. package/src/__tests__/e2e/v2/map.test.ts +55 -0
  15. package/src/__tests__/e2e/v2/scrape.test.ts +130 -0
  16. package/src/__tests__/e2e/v2/search.test.ts +247 -0
  17. package/src/__tests__/e2e/v2/usage.test.ts +36 -0
  18. package/src/__tests__/e2e/v2/utils/idmux.ts +58 -0
  19. package/src/__tests__/e2e/v2/watcher.test.ts +96 -0
  20. package/src/__tests__/unit/v2/errorHandler.test.ts +19 -0
  21. package/src/__tests__/unit/v2/scrape.unit.test.ts +11 -0
  22. package/src/__tests__/unit/v2/validation.test.ts +59 -0
  23. package/src/index.backup.ts +2146 -0
  24. package/src/index.ts +27 -2134
  25. package/src/v1/index.ts +2158 -0
  26. package/src/v2/client.ts +283 -0
  27. package/src/v2/methods/batch.ts +119 -0
  28. package/src/v2/methods/crawl.ts +144 -0
  29. package/src/v2/methods/extract.ts +86 -0
  30. package/src/v2/methods/map.ts +37 -0
  31. package/src/v2/methods/scrape.ts +26 -0
  32. package/src/v2/methods/search.ts +69 -0
  33. package/src/v2/methods/usage.ts +39 -0
  34. package/src/v2/types.ts +337 -0
  35. package/src/v2/utils/errorHandler.ts +18 -0
  36. package/src/v2/utils/getVersion.ts +14 -0
  37. package/src/v2/utils/httpClient.ts +99 -0
  38. package/src/v2/utils/validation.ts +50 -0
  39. package/src/v2/watcher.ts +159 -0
  40. package/tsconfig.json +2 -1
  41. package/dist/package-Z6F7JDXI.js +0 -111
  42. /package/src/__tests__/{v1/e2e_withAuth → e2e/v1}/index.test.ts +0 -0
  43. /package/src/__tests__/{v1/unit → unit/v1}/monitor-job-status-retry.test.ts +0 -0
package/dist/index.d.ts CHANGED
@@ -1,7 +1,509 @@
1
- import { AxiosRequestHeaders, AxiosResponse } from 'axios';
2
1
  import * as zt from 'zod';
2
+ import { ZodTypeAny } from 'zod';
3
+ import { AxiosResponse, AxiosRequestHeaders } from 'axios';
4
+ import { EventEmitter } from 'events';
3
5
  import { TypedEventTarget } from 'typescript-event-target';
4
6
 
7
+ type FormatString = "markdown" | "html" | "rawHtml" | "links" | "screenshot" | "summary" | "changeTracking" | "json";
8
+ interface Viewport {
9
+ width: number;
10
+ height: number;
11
+ }
12
+ interface Format {
13
+ type: FormatString;
14
+ }
15
+ interface JsonFormat extends Format {
16
+ type: "json";
17
+ prompt?: string;
18
+ schema?: Record<string, unknown> | ZodTypeAny;
19
+ }
20
+ interface ScreenshotFormat {
21
+ type: "screenshot";
22
+ fullPage?: boolean;
23
+ quality?: number;
24
+ viewport?: Viewport | {
25
+ width: number;
26
+ height: number;
27
+ };
28
+ }
29
+ interface ChangeTrackingFormat extends Format {
30
+ type: "changeTracking";
31
+ modes: ("git-diff" | "json")[];
32
+ schema?: Record<string, unknown>;
33
+ prompt?: string;
34
+ tag?: string;
35
+ }
36
+ type FormatOption = FormatString | Format | JsonFormat | ChangeTrackingFormat | ScreenshotFormat;
37
+ interface LocationConfig {
38
+ country?: string;
39
+ languages?: string[];
40
+ }
41
+ interface WaitAction {
42
+ type: "wait";
43
+ milliseconds?: number;
44
+ selector?: string;
45
+ }
46
+ interface ScreenshotAction {
47
+ type: "screenshot";
48
+ fullPage?: boolean;
49
+ quality?: number;
50
+ viewport?: Viewport | {
51
+ width: number;
52
+ height: number;
53
+ };
54
+ }
55
+ interface ClickAction {
56
+ type: "click";
57
+ selector: string;
58
+ }
59
+ interface WriteAction {
60
+ type: "write";
61
+ text: string;
62
+ }
63
+ interface PressAction {
64
+ type: "press";
65
+ key: string;
66
+ }
67
+ interface ScrollAction {
68
+ type: "scroll";
69
+ direction: "up" | "down";
70
+ selector?: string;
71
+ }
72
+ interface ScrapeAction {
73
+ type: "scrape";
74
+ }
75
+ interface ExecuteJavascriptAction {
76
+ type: "executeJavascript";
77
+ script: string;
78
+ }
79
+ interface PDFAction {
80
+ type: "pdf";
81
+ format?: "A0" | "A1" | "A2" | "A3" | "A4" | "A5" | "A6" | "Letter" | "Legal" | "Tabloid" | "Ledger";
82
+ landscape?: boolean;
83
+ scale?: number;
84
+ }
85
+ type ActionOption = WaitAction | ScreenshotAction | ClickAction | WriteAction | PressAction | ScrollAction | ScrapeAction | ExecuteJavascriptAction | PDFAction;
86
+ interface ScrapeOptions {
87
+ formats?: FormatOption[];
88
+ headers?: Record<string, string>;
89
+ includeTags?: string[];
90
+ excludeTags?: string[];
91
+ onlyMainContent?: boolean;
92
+ timeout?: number;
93
+ waitFor?: number;
94
+ mobile?: boolean;
95
+ parsers?: string[];
96
+ actions?: ActionOption[];
97
+ location?: LocationConfig;
98
+ skipTlsVerification?: boolean;
99
+ removeBase64Images?: boolean;
100
+ fastMode?: boolean;
101
+ useMock?: string;
102
+ blockAds?: boolean;
103
+ proxy?: "basic" | "stealth" | "auto" | string;
104
+ maxAge?: number;
105
+ storeInCache?: boolean;
106
+ }
107
+ interface WebhookConfig {
108
+ url: string;
109
+ headers?: Record<string, string>;
110
+ metadata?: Record<string, string>;
111
+ events?: Array<"completed" | "failed" | "page" | "started">;
112
+ }
113
+ interface DocumentMetadata {
114
+ title?: string;
115
+ description?: string;
116
+ language?: string;
117
+ keywords?: string | string[];
118
+ robots?: string;
119
+ ogTitle?: string;
120
+ ogDescription?: string;
121
+ ogUrl?: string;
122
+ ogImage?: string;
123
+ sourceURL?: string;
124
+ statusCode?: number;
125
+ error?: string;
126
+ [key: string]: unknown;
127
+ }
128
+ interface Document {
129
+ markdown?: string;
130
+ html?: string;
131
+ rawHtml?: string;
132
+ json?: unknown;
133
+ summary?: string;
134
+ metadata?: DocumentMetadata;
135
+ links?: string[];
136
+ screenshot?: string;
137
+ actions?: Record<string, unknown>;
138
+ warning?: string;
139
+ changeTracking?: Record<string, unknown>;
140
+ }
141
+ interface SearchResult {
142
+ url: string;
143
+ title?: string;
144
+ description?: string;
145
+ }
146
+ interface SearchData {
147
+ web?: Array<SearchResult | Document>;
148
+ news?: Array<SearchResult | Document>;
149
+ images?: Array<SearchResult | Document>;
150
+ }
151
+ interface SearchRequest {
152
+ query: string;
153
+ sources?: Array<"web" | "news" | "images" | {
154
+ type: "web" | "news" | "images";
155
+ }>;
156
+ limit?: number;
157
+ tbs?: string;
158
+ location?: string;
159
+ ignoreInvalidURLs?: boolean;
160
+ timeout?: number;
161
+ scrapeOptions?: ScrapeOptions;
162
+ }
163
+ interface CrawlOptions {
164
+ prompt?: string | null;
165
+ excludePaths?: string[] | null;
166
+ includePaths?: string[] | null;
167
+ maxDiscoveryDepth?: number | null;
168
+ sitemap?: "skip" | "include";
169
+ ignoreQueryParameters?: boolean;
170
+ limit?: number | null;
171
+ crawlEntireDomain?: boolean;
172
+ allowExternalLinks?: boolean;
173
+ allowSubdomains?: boolean;
174
+ delay?: number | null;
175
+ maxConcurrency?: number | null;
176
+ webhook?: string | WebhookConfig | null;
177
+ scrapeOptions?: ScrapeOptions | null;
178
+ zeroDataRetention?: boolean;
179
+ }
180
+ interface CrawlResponse$1 {
181
+ id: string;
182
+ url: string;
183
+ }
184
+ interface CrawlJob {
185
+ status: "scraping" | "completed" | "failed" | "cancelled";
186
+ total: number;
187
+ completed: number;
188
+ creditsUsed?: number;
189
+ expiresAt?: string;
190
+ next?: string | null;
191
+ data: Document[];
192
+ }
193
+ interface BatchScrapeOptions {
194
+ options?: ScrapeOptions;
195
+ webhook?: string | WebhookConfig;
196
+ appendToId?: string;
197
+ ignoreInvalidURLs?: boolean;
198
+ maxConcurrency?: number;
199
+ zeroDataRetention?: boolean;
200
+ integration?: string;
201
+ idempotencyKey?: string;
202
+ }
203
+ interface BatchScrapeResponse$1 {
204
+ id: string;
205
+ url: string;
206
+ invalidURLs?: string[];
207
+ }
208
+ interface BatchScrapeJob {
209
+ status: "scraping" | "completed" | "failed" | "cancelled";
210
+ completed: number;
211
+ total: number;
212
+ creditsUsed?: number;
213
+ expiresAt?: string;
214
+ next?: string | null;
215
+ data: Document[];
216
+ }
217
+ interface MapData {
218
+ links: SearchResult[];
219
+ }
220
+ interface MapOptions {
221
+ search?: string;
222
+ sitemap?: "only" | "include" | "skip";
223
+ includeSubdomains?: boolean;
224
+ limit?: number;
225
+ timeout?: number;
226
+ }
227
+ interface ExtractResponse$1 {
228
+ success?: boolean;
229
+ id?: string;
230
+ status?: "processing" | "completed" | "failed" | "cancelled";
231
+ data?: unknown;
232
+ error?: string;
233
+ warning?: string;
234
+ sources?: Record<string, unknown>;
235
+ expiresAt?: string;
236
+ }
237
+ interface ConcurrencyCheck {
238
+ concurrency: number;
239
+ maxConcurrency: number;
240
+ }
241
+ interface CreditUsage {
242
+ remainingCredits: number;
243
+ }
244
+ interface TokenUsage {
245
+ remainingTokens: number;
246
+ }
247
+ interface CrawlErrorsResponse$1 {
248
+ errors: {
249
+ id: string;
250
+ timestamp?: string;
251
+ url: string;
252
+ code?: string;
253
+ error: string;
254
+ }[];
255
+ robotsBlocked: string[];
256
+ }
257
+ interface ActiveCrawl {
258
+ id: string;
259
+ teamId: string;
260
+ url: string;
261
+ options?: Record<string, unknown> | null;
262
+ }
263
+ interface ActiveCrawlsResponse {
264
+ success: boolean;
265
+ crawls: ActiveCrawl[];
266
+ }
267
+ interface ErrorDetails {
268
+ code?: string;
269
+ message: string;
270
+ details?: Record<string, unknown>;
271
+ status?: number;
272
+ }
273
+ declare class SdkError extends Error {
274
+ status?: number;
275
+ code?: string;
276
+ details?: unknown;
277
+ constructor(message: string, status?: number, code?: string, details?: unknown);
278
+ }
279
+
280
+ interface HttpClientOptions {
281
+ apiKey: string;
282
+ apiUrl: string;
283
+ timeoutMs?: number;
284
+ maxRetries?: number;
285
+ backoffFactor?: number;
286
+ }
287
+ declare class HttpClient {
288
+ private instance;
289
+ private readonly apiKey;
290
+ private readonly apiUrl;
291
+ private readonly maxRetries;
292
+ private readonly backoffFactor;
293
+ constructor(options: HttpClientOptions);
294
+ getApiUrl(): string;
295
+ getApiKey(): string;
296
+ private request;
297
+ private sleep;
298
+ post<T = any>(endpoint: string, body: Record<string, unknown>, headers?: Record<string, string>): Promise<AxiosResponse<T, any>>;
299
+ get<T = any>(endpoint: string, headers?: Record<string, string>): Promise<AxiosResponse<T, any>>;
300
+ delete<T = any>(endpoint: string, headers?: Record<string, string>): Promise<AxiosResponse<T, any>>;
301
+ prepareHeaders(idempotencyKey?: string): Record<string, string>;
302
+ }
303
+
304
+ declare function prepareExtractPayload(args: {
305
+ urls?: string[];
306
+ prompt?: string;
307
+ schema?: Record<string, unknown> | ZodTypeAny;
308
+ systemPrompt?: string;
309
+ allowExternalLinks?: boolean;
310
+ enableWebSearch?: boolean;
311
+ showSources?: boolean;
312
+ scrapeOptions?: ScrapeOptions;
313
+ ignoreInvalidURLs?: boolean;
314
+ }): Record<string, unknown>;
315
+ declare function startExtract(http: HttpClient, args: Parameters<typeof prepareExtractPayload>[0]): Promise<ExtractResponse$1>;
316
+
317
+ type JobKind = "crawl" | "batch";
318
+ interface WatcherOptions {
319
+ kind?: JobKind;
320
+ pollInterval?: number;
321
+ timeout?: number;
322
+ }
323
+ declare class Watcher extends EventEmitter {
324
+ private readonly http;
325
+ private readonly jobId;
326
+ private readonly kind;
327
+ private readonly pollInterval;
328
+ private readonly timeout?;
329
+ private ws?;
330
+ private closed;
331
+ constructor(http: HttpClient, jobId: string, opts?: WatcherOptions);
332
+ private buildWsUrl;
333
+ start(): Promise<void>;
334
+ private attachWsHandlers;
335
+ private emitDocuments;
336
+ private emitSnapshot;
337
+ private pollLoop;
338
+ close(): void;
339
+ }
340
+
341
+ type ExtractJsonSchemaFromFormats<Formats> = Formats extends readonly any[] ? Extract<Formats[number], {
342
+ type: "json";
343
+ schema?: unknown;
344
+ }>["schema"] : never;
345
+ type InferredJsonFromOptions<Opts> = Opts extends {
346
+ formats?: infer Fmts;
347
+ } ? ExtractJsonSchemaFromFormats<Fmts> extends zt.ZodTypeAny ? zt.infer<ExtractJsonSchemaFromFormats<Fmts>> : unknown : unknown;
348
+ /**
349
+ * Configuration for the v2 client transport.
350
+ */
351
+ interface FirecrawlClientOptions {
352
+ /** API key (falls back to FIRECRAWL_API_KEY). */
353
+ apiKey?: string | null;
354
+ /** API base URL (falls back to FIRECRAWL_API_URL or https://api.firecrawl.dev). */
355
+ apiUrl?: string | null;
356
+ /** Per-request timeout in milliseconds (optional). */
357
+ timeoutMs?: number;
358
+ /** Max automatic retries for transient failures (optional). */
359
+ maxRetries?: number;
360
+ /** Exponential backoff factor for retries (optional). */
361
+ backoffFactor?: number;
362
+ }
363
+ /**
364
+ * Firecrawl v2 client. Provides typed access to all v2 endpoints and utilities.
365
+ */
366
+ declare class FirecrawlClient {
367
+ private readonly http;
368
+ /**
369
+ * Create a v2 client.
370
+ * @param options Transport configuration (API key, base URL, timeouts, retries).
371
+ */
372
+ constructor(options?: FirecrawlClientOptions);
373
+ /**
374
+ * Scrape a single URL.
375
+ * @param url Target URL.
376
+ * @param options Optional scrape options (formats, headers, etc.).
377
+ * @returns Resolved document with requested formats.
378
+ */
379
+ scrape<Opts extends ScrapeOptions>(url: string, options: Opts): Promise<Omit<Document, "json"> & {
380
+ json?: InferredJsonFromOptions<Opts>;
381
+ }>;
382
+ scrape(url: string, options?: ScrapeOptions): Promise<Document>;
383
+ /**
384
+ * Search the web and optionally scrape each result.
385
+ * @param query Search query string.
386
+ * @param req Additional search options (sources, limit, scrapeOptions, etc.).
387
+ * @returns Structured search results.
388
+ */
389
+ search(query: string, req?: Omit<SearchRequest, "query">): Promise<SearchData>;
390
+ /**
391
+ * Map a site to discover URLs (sitemap-aware).
392
+ * @param url Root URL to map.
393
+ * @param options Mapping options (sitemap mode, includeSubdomains, limit, timeout).
394
+ * @returns Discovered links.
395
+ */
396
+ map(url: string, options?: MapOptions): Promise<MapData>;
397
+ /**
398
+ * Start a crawl job (async).
399
+ * @param url Root URL to crawl.
400
+ * @param req Crawl configuration (paths, limits, scrapeOptions, webhook, etc.).
401
+ * @returns Job id and url.
402
+ */
403
+ startCrawl(url: string, req?: CrawlOptions): Promise<CrawlResponse$1>;
404
+ /**
405
+ * Get the status and partial data of a crawl job.
406
+ * @param jobId Crawl job id.
407
+ */
408
+ getCrawlStatus(jobId: string): Promise<CrawlJob>;
409
+ /**
410
+ * Cancel a crawl job.
411
+ * @param jobId Crawl job id.
412
+ * @returns True if cancelled.
413
+ */
414
+ cancelCrawl(jobId: string): Promise<boolean>;
415
+ /**
416
+ * Convenience waiter: start a crawl and poll until it finishes.
417
+ * @param url Root URL to crawl.
418
+ * @param req Crawl configuration plus waiter controls (pollInterval, timeout seconds).
419
+ * @returns Final job snapshot.
420
+ */
421
+ crawl(url: string, req?: CrawlOptions & {
422
+ pollInterval?: number;
423
+ timeout?: number;
424
+ }): Promise<CrawlJob>;
425
+ /**
426
+ * Retrieve crawl errors and robots.txt blocks.
427
+ * @param crawlId Crawl job id.
428
+ */
429
+ getCrawlErrors(crawlId: string): Promise<CrawlErrorsResponse$1>;
430
+ /**
431
+ * List active crawls for the authenticated team.
432
+ */
433
+ getActiveCrawls(): Promise<ActiveCrawlsResponse>;
434
+ /**
435
+ * Preview normalized crawl parameters produced by a natural-language prompt.
436
+ * @param url Root URL.
437
+ * @param prompt Natural-language instruction.
438
+ */
439
+ crawlParamsPreview(url: string, prompt: string): Promise<Record<string, unknown>>;
440
+ /**
441
+ * Start a batch scrape job for multiple URLs (async).
442
+ * @param urls URLs to scrape.
443
+ * @param opts Batch options (scrape options, webhook, concurrency, idempotency key, etc.).
444
+ * @returns Job id and url.
445
+ */
446
+ startBatchScrape(urls: string[], opts?: BatchScrapeOptions): Promise<BatchScrapeResponse$1>;
447
+ /**
448
+ * Get the status and partial data of a batch scrape job.
449
+ * @param jobId Batch job id.
450
+ */
451
+ getBatchScrapeStatus(jobId: string): Promise<BatchScrapeJob>;
452
+ /**
453
+ * Retrieve batch scrape errors and robots.txt blocks.
454
+ * @param jobId Batch job id.
455
+ */
456
+ getBatchScrapeErrors(jobId: string): Promise<CrawlErrorsResponse$1>;
457
+ /**
458
+ * Cancel a batch scrape job.
459
+ * @param jobId Batch job id.
460
+ * @returns True if cancelled.
461
+ */
462
+ cancelBatchScrape(jobId: string): Promise<boolean>;
463
+ /**
464
+ * Convenience waiter: start a batch scrape and poll until it finishes.
465
+ * @param urls URLs to scrape.
466
+ * @param opts Batch options plus waiter controls (pollInterval, timeout seconds).
467
+ * @returns Final job snapshot.
468
+ */
469
+ batchScrape(urls: string[], opts?: BatchScrapeOptions & {
470
+ pollInterval?: number;
471
+ timeout?: number;
472
+ }): Promise<BatchScrapeJob>;
473
+ /**
474
+ * Start an extract job (async).
475
+ * @param args Extraction request (urls, schema or prompt, flags).
476
+ * @returns Job id or processing state.
477
+ */
478
+ startExtract(args: Parameters<typeof startExtract>[1]): Promise<ExtractResponse$1>;
479
+ /**
480
+ * Get extract job status/data.
481
+ * @param jobId Extract job id.
482
+ */
483
+ getExtractStatus(jobId: string): Promise<ExtractResponse$1>;
484
+ /**
485
+ * Convenience waiter: start an extract and poll until it finishes.
486
+ * @param args Extraction request plus waiter controls (pollInterval, timeout seconds).
487
+ * @returns Final extract response.
488
+ */
489
+ extract(args: Parameters<typeof startExtract>[1] & {
490
+ pollInterval?: number;
491
+ timeout?: number;
492
+ }): Promise<ExtractResponse$1>;
493
+ /** Current concurrency usage. */
494
+ getConcurrency(): Promise<ConcurrencyCheck>;
495
+ /** Current credit usage. */
496
+ getCreditUsage(): Promise<CreditUsage>;
497
+ /** Recent token usage. */
498
+ getTokenUsage(): Promise<TokenUsage>;
499
+ /**
500
+ * Create a watcher for a crawl or batch job. Emits: `document`, `snapshot`, `done`, `error`.
501
+ * @param jobId Job id.
502
+ * @param opts Watcher options (kind, pollInterval, timeout seconds).
503
+ */
504
+ watcher(jobId: string, opts?: WatcherOptions): Watcher;
505
+ }
506
+
5
507
  /**
6
508
  * Configuration interface for FirecrawlApp.
7
509
  * @param apiKey - Optional API key for authentication.
@@ -343,15 +845,6 @@ interface ErrorResponse {
343
845
  success: false;
344
846
  error: string;
345
847
  }
346
- /**
347
- * Custom error class for Firecrawl.
348
- * Extends the built-in Error class to include a status code.
349
- */
350
- declare class FirecrawlError extends Error {
351
- statusCode: number;
352
- details?: any;
353
- constructor(message: string, statusCode: number, details?: any);
354
- }
355
848
  /**
356
849
  * Parameters for search operations.
357
850
  * Defines options for searching and scraping search results.
@@ -388,6 +881,7 @@ interface CrawlErrorsResponse {
388
881
  id: string;
389
882
  timestamp?: string;
390
883
  url: string;
884
+ code?: string;
391
885
  error: string;
392
886
  }[];
393
887
  /**
@@ -813,4 +1307,23 @@ declare class CrawlWatcher extends TypedEventTarget<CrawlWatcherEvents> {
813
1307
  close(): void;
814
1308
  }
815
1309
 
816
- export { type Action, type ActionsResult, type AgentOptions, type AgentOptionsExtract, type BatchScrapeResponse, type BatchScrapeStatusResponse, type CrawlErrorsResponse, type CrawlParams, type CrawlResponse, type CrawlScrapeOptions, type CrawlStatusResponse, CrawlWatcher, type DeepResearchParams, type DeepResearchResponse, type DeepResearchStatusResponse, type ErrorResponse, type ExtractParams, type ExtractResponse, type FirecrawlAppConfig, type FirecrawlDocument, type FirecrawlDocumentMetadata, FirecrawlError, type GenerateLLMsTextParams, type GenerateLLMsTextResponse, type GenerateLLMsTextStatusResponse, type MapParams, type MapResponse, type ScrapeParams, type ScrapeResponse, type SearchParams, type SearchResponse, FirecrawlApp as default };
1310
+ /**
1311
+ * Firecrawl JS/TS SDK — unified entrypoint.
1312
+ * - v2 by default on the top‑level client
1313
+ * - v1 available under `.v1` (feature‑frozen)
1314
+ * - Exports: `Firecrawl` (default), `FirecrawlClient` (v2), `FirecrawlAppV1` (v1), and v2 types
1315
+ */
1316
+ /** Direct v2 client. */
1317
+
1318
+ /** Unified client: extends v2 and adds `.v1` for backward compatibility. */
1319
+ declare class Firecrawl extends FirecrawlClient {
1320
+ /** Feature‑frozen v1 client (lazy). */
1321
+ private _v1?;
1322
+ private _v1Opts;
1323
+ /** @param opts API credentials and base URL. */
1324
+ constructor(opts?: FirecrawlAppConfig);
1325
+ /** Access the legacy v1 client (instantiated on first access). */
1326
+ get v1(): FirecrawlApp;
1327
+ }
1328
+
1329
+ export { type ActionOption, type ActiveCrawl, type ActiveCrawlsResponse, type BatchScrapeJob, type BatchScrapeOptions, type BatchScrapeResponse$1 as BatchScrapeResponse, type ChangeTrackingFormat, type ClickAction, type ConcurrencyCheck, type CrawlErrorsResponse$1 as CrawlErrorsResponse, type CrawlJob, type CrawlOptions, type CrawlResponse$1 as CrawlResponse, type CreditUsage, type Document, type DocumentMetadata, type ErrorDetails, type ExecuteJavascriptAction, type ExtractResponse$1 as ExtractResponse, Firecrawl, FirecrawlApp as FirecrawlAppV1, FirecrawlClient, type Format, type FormatOption, type FormatString, type JsonFormat, type LocationConfig, type MapData, type MapOptions, type PDFAction, type PressAction, type ScrapeAction, type ScrapeOptions, type ScreenshotAction, type ScreenshotFormat, type ScrollAction, SdkError, type SearchData, type SearchRequest, type SearchResult, type TokenUsage, type Viewport, type WaitAction, type WebhookConfig, type WriteAction, Firecrawl as default };