@crawlgate/sdk 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -316,20 +316,21 @@ function buildScrapeBody(url, options) {
316
316
  async function scrape(http, url, options) {
317
317
  const body = buildScrapeBody(url, options);
318
318
  const response = await http.post("/v1/scrape", body);
319
- if (!response.data.success) {
319
+ const data = response.data;
320
+ if (data.success === false) {
320
321
  throw new CrawlGateError(
321
- response.data.error || "Scrape failed",
322
+ data.error || "Scrape failed",
322
323
  void 0,
323
- "SCRAPE_ERROR"
324
+ data.code || "SCRAPE_ERROR"
324
325
  );
325
326
  }
326
- if (!response.data.data) {
327
- throw new CrawlGateError("No data returned from scrape", void 0, "NO_DATA");
327
+ if (data.success === true && data.data) {
328
+ return data.data;
328
329
  }
329
- const document = {
330
- ...response.data.data
331
- };
332
- return document;
330
+ if (data.url || data.markdown || data.html) {
331
+ return data;
332
+ }
333
+ throw new CrawlGateError("No data returned from scrape", void 0, "NO_DATA");
333
334
  }
334
335
 
335
336
  // src/methods/crawl.ts
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/utils/httpClient.ts","../src/errors.ts","../src/methods/scrape.ts","../src/methods/crawl.ts","../src/methods/map.ts","../src/methods/search.ts","../src/methods/batch.ts","../src/methods/extract.ts","../src/methods/usage.ts","../src/client.ts"],"sourcesContent":["/**\n * @crawlgate/sdk - Official JavaScript/TypeScript SDK for CrawlGate Search Engine API\n *\n * @packageDocumentation\n * @see https://docs.crawlgate.io\n */\n\n// Main client\nexport { CrawlGateClient, CrawlGateClient as default } from \"./client\";\n\n// Error classes\nexport {\n CrawlGateError,\n AuthenticationError,\n ValidationError,\n JobTimeoutError,\n ServiceUnavailableError,\n RateLimitError,\n ExtractionError,\n} from \"./errors\";\n\n// Types - Client\nexport type { CrawlGateClientOptions } from \"./types\";\n\n// Types - Engine & Options\nexport type {\n Engine,\n ProxyOption,\n FormatType,\n LLMProvider,\n JsonSchema,\n ExtractOptions,\n ExtractResult,\n} from \"./types\";\n\n// Types - Scrape\nexport type {\n ScrapeOptions,\n ScrapeResponse,\n Document,\n DocumentMetadata,\n} from \"./types\";\n\n// Types - Batch Scrape\nexport type {\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n WebhookConfig,\n} from \"./types\";\n\n// Types - Crawl\nexport type {\n CrawlOptions,\n CrawlResponse,\n CrawlJob,\n CrawlStatus,\n CrawlError,\n CrawlErrorsResponse,\n} from \"./types\";\n\n// Types - Extract\nexport type {\n ExtractRequestOptions,\n ExtractResponse,\n ExtractStatus,\n} from \"./types\";\n\n// Types - Map\nexport type { MapOptions, MapResponse } from \"./types\";\n\n// Types - Search\nexport type {\n SearchOptions,\n SearchResponse,\n SearchResult,\n} from \"./types\";\n\n// Types - Usage & Monitoring\nexport type {\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\n\n// Types - Pagination\nexport type { PaginationConfig } from \"./types\";\n","import axios, {\n type AxiosInstance,\n type AxiosRequestConfig,\n type AxiosResponse,\n} from \"axios\";\nimport { CrawlGateError, parseApiError } from \"../errors\";\n\n/**\n * HTTP Client configuration options\n */\nexport interface HttpClientOptions {\n /**\n * API key for authentication\n */\n apiKey: string;\n\n /**\n * Base URL for the API\n */\n apiUrl: string;\n\n /**\n * Request timeout in milliseconds\n */\n timeoutMs?: number;\n\n /**\n * Maximum number of retries\n */\n maxRetries?: number;\n\n /**\n * Backoff factor for retries (in seconds)\n */\n backoffFactor?: number;\n}\n\n/**\n * HTTP Client with retry logic and error handling\n */\nexport class HttpClient {\n private readonly instance: AxiosInstance;\n private readonly apiKey: string;\n private readonly apiUrl: string;\n private readonly maxRetries: number;\n private readonly backoffFactor: number;\n\n constructor(options: HttpClientOptions) {\n this.apiKey = options.apiKey;\n this.apiUrl = options.apiUrl.replace(/\\/$/, \"\");\n this.maxRetries = options.maxRetries ?? 3;\n this.backoffFactor = options.backoffFactor ?? 0.5;\n\n this.instance = axios.create({\n baseURL: this.apiUrl,\n timeout: options.timeoutMs ?? 90000,\n headers: {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": this.apiKey,\n },\n });\n }\n\n /**\n * Get the configured API URL\n */\n getApiUrl(): string {\n return this.apiUrl;\n }\n\n /**\n * Get the configured API key\n */\n getApiKey(): string {\n return this.apiKey;\n }\n\n /**\n * Sleep for specified seconds\n */\n private sleep(seconds: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, seconds * 1000));\n }\n\n /**\n * Check if error is retryable\n */\n private isRetryableError(status?: number): boolean {\n return status === 502 || status === 503 || status === 429;\n }\n\n /**\n * Make HTTP request with retry logic\n */\n private async request<T = unknown>(\n config: AxiosRequestConfig\n ): Promise<AxiosResponse<T>> {\n let lastError: Error | undefined;\n\n for (let attempt = 0; attempt < this.maxRetries; attempt++) {\n try {\n // Add SDK origin to request body for tracking\n if (\n config.method &&\n [\"post\", \"put\", \"patch\"].includes(config.method.toLowerCase())\n ) {\n const data = (config.data ?? {}) as Record<string, unknown>;\n config.data = { ...data, origin: \"crawlgate-sdk\" };\n\n // If timeout is specified in body, extend request timeout\n if (typeof data.timeout === \"number\") {\n config.timeout = data.timeout + 5000;\n }\n }\n\n const response = await this.instance.request<T>(config);\n\n // Check for retryable status codes even on \"successful\" responses\n if (\n this.isRetryableError(response.status) &&\n attempt < this.maxRetries - 1\n ) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n return response;\n } catch (err: unknown) {\n const axiosError = err as {\n response?: { status: number; data: unknown };\n message?: string;\n };\n\n lastError =\n err instanceof Error ? err : new Error(String(axiosError?.message));\n const status = axiosError?.response?.status;\n\n // Retry on retryable errors\n if (this.isRetryableError(status) && attempt < this.maxRetries - 1) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n // Parse and throw appropriate error\n if (axiosError?.response) {\n parseApiError(\n axiosError.response.status,\n axiosError.response.data as {\n error?: string;\n message?: string;\n details?: unknown;\n }\n );\n }\n\n throw lastError;\n }\n }\n\n throw lastError ?? new CrawlGateError(\"Unexpected HTTP client error\");\n }\n\n /**\n * Make POST request\n */\n async post<T = unknown>(\n endpoint: string,\n body: Record<string, unknown>,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"post\",\n url: endpoint,\n data: body,\n headers,\n });\n }\n\n /**\n * Make GET request\n */\n async get<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"get\",\n url: endpoint,\n headers,\n });\n }\n\n /**\n * Make DELETE request\n */\n async delete<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"delete\",\n url: endpoint,\n headers,\n });\n }\n}\n","/**\n * Base error class for CrawlGate SDK errors\n */\nexport class CrawlGateError extends Error {\n /**\n * HTTP status code (if applicable)\n */\n public readonly statusCode?: number;\n\n /**\n * Error code for programmatic handling\n */\n public readonly code?: string;\n\n /**\n * Additional error details\n */\n public readonly details?: unknown;\n\n constructor(\n message: string,\n statusCode?: number,\n code?: string,\n details?: unknown\n ) {\n super(message);\n this.name = \"CrawlGateError\";\n this.statusCode = statusCode;\n this.code = code;\n this.details = details;\n\n // Maintains proper stack trace for where error was thrown\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, CrawlGateError);\n }\n }\n}\n\n/**\n * Error thrown when authentication fails\n */\nexport class AuthenticationError extends CrawlGateError {\n constructor(message: string = \"Invalid API Key\") {\n super(message, 401, \"AUTHENTICATION_ERROR\");\n this.name = \"AuthenticationError\";\n }\n}\n\n/**\n * Error thrown when request validation fails\n */\nexport class ValidationError extends CrawlGateError {\n constructor(message: string, details?: unknown) {\n super(message, 400, \"VALIDATION_ERROR\", details);\n this.name = \"ValidationError\";\n }\n}\n\n/**\n * Error thrown when a crawl job times out\n */\nexport class JobTimeoutError extends CrawlGateError {\n /**\n * Job ID that timed out\n */\n public readonly jobId: string;\n\n /**\n * Timeout duration in seconds\n */\n public readonly timeoutSeconds: number;\n\n constructor(jobId: string, timeoutSeconds: number) {\n super(\n `Crawl job ${jobId} did not complete within ${timeoutSeconds} seconds`,\n undefined,\n \"JOB_TIMEOUT\"\n );\n this.name = \"JobTimeoutError\";\n this.jobId = jobId;\n this.timeoutSeconds = timeoutSeconds;\n }\n}\n\n/**\n * Error thrown when upstream service is unavailable\n */\nexport class ServiceUnavailableError extends CrawlGateError {\n constructor(message: string = \"Service temporarily unavailable\") {\n super(message, 503, \"SERVICE_UNAVAILABLE\");\n this.name = \"ServiceUnavailableError\";\n }\n}\n\n/**\n * Error thrown when rate limit is exceeded\n */\nexport class RateLimitError extends CrawlGateError {\n /**\n * Time to wait before retrying (in seconds)\n */\n public readonly retryAfter?: number;\n\n constructor(message: string = \"Rate limit exceeded\", retryAfter?: number) {\n super(message, 429, \"RATE_LIMIT_EXCEEDED\");\n this.name = \"RateLimitError\";\n this.retryAfter = retryAfter;\n }\n}\n\n/**\n * Error thrown when LLM extraction fails\n */\nexport class ExtractionError extends CrawlGateError {\n /**\n * Provider that failed\n */\n public readonly provider?: string;\n\n constructor(message: string, provider?: string) {\n super(message, undefined, \"EXTRACTION_ERROR\");\n this.name = \"ExtractionError\";\n this.provider = provider;\n }\n}\n\n/**\n * Parse error response from API and throw appropriate error\n */\nexport function parseApiError(\n status: number,\n data: { error?: string; message?: string; details?: unknown }\n): never {\n const message = data.error || data.message || \"Unknown error\";\n\n switch (status) {\n case 400:\n throw new ValidationError(message, data.details);\n case 401:\n throw new AuthenticationError(message);\n case 429:\n throw new RateLimitError(message);\n case 502:\n case 503:\n throw new ServiceUnavailableError(message);\n default:\n throw new CrawlGateError(message, status, undefined, data.details);\n }\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { ScrapeOptions, ScrapeResponse, Document } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n // Check if it's a Zod schema by looking for _def property\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for scrape endpoint\n */\nfunction buildScrapeBody(\n url: string,\n options?: ScrapeOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.waitFor !== undefined) {\n body.waitFor = options.waitFor;\n }\n\n if (options?.timeout !== undefined) {\n body.timeout = options.timeout;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Scrape a single URL\n *\n * @param http - HTTP client instance\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document\n */\nexport async function scrape(\n http: HttpClient,\n url: string,\n options?: ScrapeOptions\n): Promise<Document> {\n const body = buildScrapeBody(url, options);\n\n const response = await http.post<ScrapeResponse>(\"/v1/scrape\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Scrape failed\",\n undefined,\n \"SCRAPE_ERROR\"\n );\n }\n\n if (!response.data.data) {\n throw new CrawlGateError(\"No data returned from scrape\", undefined, \"NO_DATA\");\n }\n\n // Add engine info to the document\n const document: Document = {\n ...response.data.data,\n };\n\n return document;\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { CrawlOptions, CrawlResponse, CrawlJob, CrawlErrorsResponse } from \"../types\";\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\n\n/**\n * Build request body for crawl endpoint\n */\nfunction buildCrawlBody(\n url: string,\n options?: CrawlOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Start a crawl job (async)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n */\nexport async function startCrawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlResponse> {\n const body = buildCrawlBody(url, options);\n\n const response = await http.post<CrawlResponse>(\"/v1/crawl\", body);\n\n if (!response.data.success && !response.data.id) {\n throw new CrawlGateError(\n \"Failed to start crawl job\",\n undefined,\n \"CRAWL_START_ERROR\"\n );\n }\n\n return {\n success: true,\n id: response.data.id,\n jobId: response.data.id,\n status: response.data.status || \"scraping\",\n engine: response.data.engine,\n };\n}\n\n/**\n * Get crawl job status\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Current job status and data\n */\nexport async function getCrawlStatus(\n http: HttpClient,\n jobId: string\n): Promise<CrawlJob> {\n const response = await http.get<CrawlJob>(`/v1/crawl/${jobId}`);\n\n return {\n id: response.data.id || jobId,\n status: response.data.status,\n total: response.data.total || 0,\n completed: response.data.completed || 0,\n data: response.data.data || [],\n engine: response.data.engine,\n error: response.data.error,\n };\n}\n\n/**\n * Cancel a crawl job\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\nexport async function cancelCrawl(\n http: HttpClient,\n jobId: string\n): Promise<boolean> {\n const response = await http.delete<{ success?: boolean; message?: string }>(\n `/v1/crawl/${jobId}`\n );\n\n return response.data.success !== false;\n}\n\n/**\n * Sleep for specified milliseconds\n */\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Crawl a website and wait for completion (waiter pattern)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all data\n */\nexport async function crawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlJob> {\n const pollInterval = options?.pollInterval ?? 2000; // 2 seconds default\n const timeout = options?.timeout ?? 300; // 5 minutes default (in seconds)\n\n // Start the crawl job\n const { id: jobId } = await startCrawl(http, url, options);\n\n const startTime = Date.now();\n const timeoutMs = timeout * 1000;\n\n // Poll until completion or timeout\n while (true) {\n const status = await getCrawlStatus(http, jobId);\n\n // Check for terminal states\n if (status.status === \"completed\") {\n return status;\n }\n\n if (status.status === \"failed\") {\n throw new CrawlGateError(\n status.error || \"Crawl job failed\",\n undefined,\n \"CRAWL_FAILED\"\n );\n }\n\n if (status.status === \"cancelled\") {\n throw new CrawlGateError(\n \"Crawl job was cancelled\",\n undefined,\n \"CRAWL_CANCELLED\"\n );\n }\n\n // Check for timeout\n if (Date.now() - startTime > timeoutMs) {\n throw new JobTimeoutError(jobId, timeout);\n }\n\n // Wait before next poll\n await sleep(pollInterval);\n }\n}\n\n/**\n * Get crawl job errors and robots.txt blocks\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\nexport async function getCrawlErrors(\n http: HttpClient,\n jobId: string\n): Promise<CrawlErrorsResponse> {\n const response = await http.get<{\n success?: boolean;\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\n errors?: Array<Record<string, string>>;\n robotsBlocked?: string[];\n }>(`/v1/crawl/${jobId}/errors`);\n\n const payload = response.data.data ?? response.data;\n return {\n errors: (payload.errors || []).map((e) => ({\n id: e.id || \"\",\n timestamp: e.timestamp,\n url: e.url || \"\",\n code: e.code,\n error: e.error || e.message || \"Unknown error\",\n })),\n robotsBlocked: payload.robotsBlocked || [],\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { MapOptions, MapResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Build request body for map endpoint\n */\nfunction buildMapBody(\n url: string,\n options?: MapOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Map a website to discover all URLs\n *\n * @param http - HTTP client instance\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n */\nexport async function map(\n http: HttpClient,\n url: string,\n options?: MapOptions\n): Promise<MapResponse> {\n const body = buildMapBody(url, options);\n\n const response = await http.post<MapResponse>(\"/v1/map\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Map failed\",\n undefined,\n \"MAP_ERROR\"\n );\n }\n\n return {\n success: true,\n links: response.data.links || [],\n count: response.data.count || response.data.links?.length || 0,\n engine: response.data.engine,\n };\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { SearchOptions, SearchResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for search endpoint\n */\nfunction buildSearchBody(\n query: string,\n options?: SearchOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { query };\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.lang) {\n body.lang = options.lang;\n }\n\n if (options?.country) {\n body.country = options.country;\n }\n\n if (options?.engines) {\n body.engines = options.engines;\n }\n\n if (options?.scrapeOptions) {\n body.scrapeOptions = options.scrapeOptions;\n }\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Search the web and optionally scrape results\n *\n * @param http - HTTP client instance\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n */\nexport async function search(\n http: HttpClient,\n query: string,\n options?: SearchOptions\n): Promise<SearchResponse> {\n const body = buildSearchBody(query, options);\n\n const response = await http.post<SearchResponse>(\"/v1/search\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Search failed\",\n undefined,\n \"SEARCH_ERROR\"\n );\n }\n\n return {\n success: true,\n data: response.data.data || [],\n query: response.data.query || query,\n totalResults: response.data.totalResults,\n searchTime: response.data.searchTime,\n extract: response.data.extract,\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type {\r\n BatchScrapeOptions,\r\n BatchScrapeResponse,\r\n BatchScrapeJob,\r\n CrawlErrorsResponse,\r\n Document,\r\n ScrapeOptions,\r\n} from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Build request body for batch scrape endpoint\r\n */\r\nfunction buildBatchBody(\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = { urls };\r\n\r\n if (options?.options) {\r\n // Spread scrape options into body\r\n const scrapeOpts = options.options;\r\n if (scrapeOpts.engine) body.engine = scrapeOpts.engine;\r\n if (scrapeOpts.formats) body.formats = scrapeOpts.formats;\r\n if (scrapeOpts.onlyMainContent !== undefined) body.onlyMainContent = scrapeOpts.onlyMainContent;\r\n if (scrapeOpts.excludeTags) body.excludeTags = scrapeOpts.excludeTags;\r\n if (scrapeOpts.waitFor !== undefined) body.waitFor = scrapeOpts.waitFor;\r\n if (scrapeOpts.timeout !== undefined) body.timeout = scrapeOpts.timeout;\r\n if (scrapeOpts.proxy) body.proxy = scrapeOpts.proxy;\r\n }\r\n\r\n if (options?.webhook != null) {\r\n body.webhook = options.webhook;\r\n }\r\n\r\n if (options?.appendToId != null) {\r\n body.appendToId = options.appendToId;\r\n }\r\n\r\n if (options?.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options?.maxConcurrency != null) {\r\n body.maxConcurrency = options.maxConcurrency;\r\n }\r\n\r\n if (options?.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start a batch scrape job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options\r\n * @returns Batch job ID and initial status\r\n */\r\nexport async function startBatchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeResponse> {\r\n if (!Array.isArray(urls) || urls.length === 0) {\r\n throw new CrawlGateError(\"URLs array cannot be empty\", 400, \"VALIDATION_ERROR\");\r\n }\r\n\r\n const body = buildBatchBody(urls, options);\r\n\r\n const headers: Record<string, string> = {};\r\n if (options?.idempotencyKey) {\r\n headers[\"Idempotency-Key\"] = options.idempotencyKey;\r\n }\r\n\r\n const response = await http.post<BatchScrapeResponse>(\r\n \"/v1/batch/scrape\",\r\n body,\r\n Object.keys(headers).length > 0 ? headers : undefined\r\n );\r\n\r\n if (!response.data.success && !response.data.id) {\r\n throw new CrawlGateError(\r\n response.data.error || \"Failed to start batch scrape job\",\r\n undefined,\r\n \"BATCH_START_ERROR\"\r\n );\r\n }\r\n\r\n return {\r\n success: true,\r\n id: response.data.id,\r\n url: response.data.url,\r\n invalidURLs: response.data.invalidURLs,\r\n };\r\n}\r\n\r\n/**\r\n * Get batch scrape job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getBatchScrapeStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<BatchScrapeJob> {\r\n const response = await http.get<BatchScrapeJob & { success?: boolean }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return {\r\n id: response.data.id || jobId,\r\n status: response.data.status,\r\n total: response.data.total || 0,\r\n completed: response.data.completed || 0,\r\n creditsUsed: response.data.creditsUsed,\r\n expiresAt: response.data.expiresAt,\r\n next: response.data.next ?? null,\r\n data: response.data.data || [],\r\n error: response.data.error,\r\n };\r\n}\r\n\r\n/**\r\n * Cancel a batch scrape job\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns True if cancelled successfully\r\n */\r\nexport async function cancelBatchScrape(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<boolean> {\r\n const response = await http.delete<{ success?: boolean; status?: string }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return response.data.status === \"cancelled\" || response.data.success !== false;\r\n}\r\n\r\n/**\r\n * Get batch scrape errors\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Errors and robots.txt blocks\r\n */\r\nexport async function getBatchScrapeErrors(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<CrawlErrorsResponse> {\r\n const response = await http.get<{\r\n success?: boolean;\r\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\r\n errors?: Array<Record<string, string>>;\r\n robotsBlocked?: string[];\r\n }>(`/v1/batch/scrape/${jobId}/errors`);\r\n\r\n const payload = response.data.data ?? response.data;\r\n return {\r\n errors: (payload.errors || []).map((e) => ({\r\n id: e.id || \"\",\r\n timestamp: e.timestamp,\r\n url: e.url || \"\",\r\n code: e.code,\r\n error: e.error || e.message || \"Unknown error\",\r\n })),\r\n robotsBlocked: payload.robotsBlocked || [],\r\n };\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for batch scrape job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final job status with all data\r\n */\r\nasync function waitForBatchCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<BatchScrapeJob> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getBatchScrapeStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Batch scrape job failed\",\r\n undefined,\r\n \"BATCH_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Batch scrape job was cancelled\",\r\n undefined,\r\n \"BATCH_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Batch scrape multiple URLs and wait for completion (waiter pattern)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options including pollInterval and timeout\r\n * @returns Final job with all scraped data\r\n */\r\nexport async function batchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeJob> {\r\n const pollInterval = options?.pollInterval ?? 2000;\r\n const timeout = options?.timeout;\r\n\r\n // Start the batch job\r\n const { id: jobId } = await startBatchScrape(http, urls, options);\r\n\r\n // Wait for completion\r\n return waitForBatchCompletion(http, jobId, pollInterval, timeout);\r\n}\r\n\r\n/**\r\n * Split URLs into chunks for large batch operations\r\n *\r\n * @param urls - Array of URLs\r\n * @param chunkSize - Maximum URLs per chunk (default: 100)\r\n * @returns Array of URL chunks\r\n */\r\nexport function chunkUrls(urls: string[], chunkSize: number = 100): string[][] {\r\n const chunks: string[][] = [];\r\n for (let i = 0; i < urls.length; i += chunkSize) {\r\n chunks.push(urls.slice(i, i + chunkSize));\r\n }\r\n return chunks;\r\n}\r\n","import type { ZodTypeAny } from \"zod\";\r\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\r\nimport type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ExtractRequestOptions, ExtractResponse, ScrapeOptions } from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Check if value is a Zod schema\r\n */\r\nfunction isZodSchema(value: unknown): value is ZodTypeAny {\r\n return (\r\n value !== null &&\r\n typeof value === \"object\" &&\r\n \"_def\" in value &&\r\n (typeof (value as any).safeParse === \"function\" || typeof (value as any).parse === \"function\")\r\n );\r\n}\r\n\r\n/**\r\n * Convert Zod schema to JSON Schema if needed\r\n */\r\nfunction convertSchema(\r\n schema: Record<string, unknown> | ZodTypeAny\r\n): Record<string, unknown> {\r\n if (isZodSchema(schema)) {\r\n return zodToJsonSchema(schema) as Record<string, unknown>;\r\n }\r\n return schema as Record<string, unknown>;\r\n}\r\n\r\n/**\r\n * Build request body for extract endpoint\r\n */\r\nfunction buildExtractBody(\r\n options: ExtractRequestOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = {};\r\n\r\n if (options.urls) {\r\n body.urls = options.urls;\r\n }\r\n\r\n if (options.prompt != null) {\r\n body.prompt = options.prompt;\r\n }\r\n\r\n if (options.schema != null) {\r\n body.schema = convertSchema(options.schema);\r\n }\r\n\r\n if (options.systemPrompt != null) {\r\n body.systemPrompt = options.systemPrompt;\r\n }\r\n\r\n if (options.allowExternalLinks != null) {\r\n body.allowExternalLinks = options.allowExternalLinks;\r\n }\r\n\r\n if (options.enableWebSearch != null) {\r\n body.enableWebSearch = options.enableWebSearch;\r\n }\r\n\r\n if (options.showSources != null) {\r\n body.showSources = options.showSources;\r\n }\r\n\r\n if (options.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options.provider) {\r\n body.provider = options.provider;\r\n }\r\n\r\n if (options.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n if (options.scrapeOptions) {\r\n body.scrapeOptions = options.scrapeOptions;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start an extract job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options\r\n * @returns Extract job ID or immediate result\r\n */\r\nexport async function startExtract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const body = buildExtractBody(options);\r\n\r\n const response = await http.post<ExtractResponse>(\"/v1/extract\", body);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Get extract job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getExtractStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<ExtractResponse> {\r\n const response = await http.get<ExtractResponse>(`/v1/extract/${jobId}`);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_STATUS_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for extract job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final extract result\r\n */\r\nasync function waitForExtractCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<ExtractResponse> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getExtractStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Extract job failed\",\r\n undefined,\r\n \"EXTRACT_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Extract job was cancelled\",\r\n undefined,\r\n \"EXTRACT_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Extract structured data from URLs using LLM (waiter pattern)\r\n *\r\n * This method starts an extract job and polls until completion.\r\n * For synchronous extracts (small payloads), it may return immediately.\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options including pollInterval and timeout\r\n * @returns Final extract result with structured data\r\n *\r\n * @example\r\n * ```typescript\r\n * import { z } from 'zod';\r\n *\r\n * const result = await extract(http, {\r\n * urls: ['https://example.com/product'],\r\n * schema: z.object({\r\n * name: z.string(),\r\n * price: z.number(),\r\n * inStock: z.boolean()\r\n * }),\r\n * systemPrompt: 'Extract product information',\r\n * provider: 'openai'\r\n * });\r\n *\r\n * console.log(result.data);\r\n * ```\r\n */\r\nexport async function extract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const pollInterval = options.pollInterval ?? 2000;\r\n const timeout = options.timeout;\r\n\r\n // Start the extract job\r\n const started = await startExtract(http, options);\r\n\r\n // If no job ID, it was a synchronous response\r\n if (!started.id) {\r\n return started;\r\n }\r\n\r\n // If already completed, return immediately\r\n if (started.status === \"completed\") {\r\n return started;\r\n }\r\n\r\n // Wait for completion\r\n return waitForExtractCompletion(http, started.id, pollInterval, timeout);\r\n}\r\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ConcurrencyInfo, CreditUsage, TokenUsage, QueueStatus } from \"../types\";\r\n\r\n/**\r\n * Get current concurrency usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Current and max concurrency\r\n */\r\nexport async function getConcurrency(http: HttpClient): Promise<ConcurrencyInfo> {\r\n const response = await http.get<ConcurrencyInfo>(\"/v1/concurrency\");\r\n\r\n return {\r\n concurrency: response.data.concurrency ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n };\r\n}\r\n\r\n/**\r\n * Get current credit usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Credit usage information\r\n */\r\nexport async function getCreditUsage(http: HttpClient): Promise<CreditUsage> {\r\n const response = await http.get<CreditUsage>(\"/v1/credits\");\r\n\r\n return {\r\n remainingCredits: response.data.remainingCredits ?? 0,\r\n planCredits: response.data.planCredits,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get current token usage (for LLM extraction)\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Token usage information\r\n */\r\nexport async function getTokenUsage(http: HttpClient): Promise<TokenUsage> {\r\n const response = await http.get<TokenUsage>(\"/v1/tokens\");\r\n\r\n return {\r\n remainingTokens: response.data.remainingTokens ?? 0,\r\n planTokens: response.data.planTokens,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get queue status information\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Queue status metrics\r\n */\r\nexport async function getQueueStatus(http: HttpClient): Promise<QueueStatus> {\r\n const response = await http.get<QueueStatus>(\"/v1/queue\");\r\n\r\n return {\r\n success: response.data.success ?? true,\r\n jobsInQueue: response.data.jobsInQueue ?? 0,\r\n activeJobsInQueue: response.data.activeJobsInQueue ?? 0,\r\n waitingJobsInQueue: response.data.waitingJobsInQueue ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n mostRecentSuccess: response.data.mostRecentSuccess,\r\n };\r\n}\r\n","import { HttpClient } from \"./utils/httpClient\";\nimport { scrape } from \"./methods/scrape\";\nimport { startCrawl, getCrawlStatus, cancelCrawl, crawl, getCrawlErrors } from \"./methods/crawl\";\nimport { map } from \"./methods/map\";\nimport { search } from \"./methods/search\";\nimport {\n startBatchScrape,\n getBatchScrapeStatus,\n cancelBatchScrape,\n batchScrape,\n getBatchScrapeErrors,\n} from \"./methods/batch\";\nimport { startExtract, getExtractStatus, extract } from \"./methods/extract\";\nimport { getConcurrency, getCreditUsage, getTokenUsage, getQueueStatus } from \"./methods/usage\";\nimport type {\n CrawlGateClientOptions,\n ScrapeOptions,\n CrawlOptions,\n MapOptions,\n SearchOptions,\n Document,\n CrawlResponse,\n CrawlJob,\n CrawlErrorsResponse,\n MapResponse,\n SearchResponse,\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n ExtractRequestOptions,\n ExtractResponse,\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\nimport { CrawlGateError } from \"./errors\";\n\n/**\n * CrawlGate SDK Client\n *\n * @example\n * ```typescript\n * import { CrawlGateClient } from '@crawlgate/sdk';\n *\n * const client = new CrawlGateClient({\n * apiKey: 'sk_live_...',\n * apiUrl: 'https://api.crawlgate.io'\n * });\n *\n * // Scrape a single URL\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html']\n * });\n *\n * // Batch scrape multiple URLs\n * const job = await client.batchScrape(['https://a.com', 'https://b.com'], {\n * options: { formats: ['markdown'] }\n * });\n *\n * // Crawl a website\n * const crawlJob = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Extract structured data with LLM\n * const extracted = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Search the web\n * const results = await client.search('best restaurants', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] }\n * });\n * ```\n */\nexport class CrawlGateClient {\n private readonly http: HttpClient;\n\n /**\n * Create a new CrawlGate client\n *\n * @param options - Client configuration options\n * @throws {CrawlGateError} If API key is not provided\n */\n constructor(options: CrawlGateClientOptions = {}) {\n const apiKey = options.apiKey ?? process.env.CRAWLGATE_API_KEY ?? \"\";\n const apiUrl = (\n options.apiUrl ??\n process.env.CRAWLGATE_API_URL ??\n \"https://api.crawlgate.io\"\n ).replace(/\\/$/, \"\");\n\n if (!apiKey) {\n throw new CrawlGateError(\n \"API key is required. Set CRAWLGATE_API_KEY env variable or pass apiKey option.\",\n undefined,\n \"MISSING_API_KEY\"\n );\n }\n\n this.http = new HttpClient({\n apiKey,\n apiUrl,\n timeoutMs: options.timeoutMs,\n maxRetries: options.maxRetries,\n backoffFactor: options.backoffFactor,\n });\n }\n\n // ==========================================================================\n // Scrape Methods\n // ==========================================================================\n\n /**\n * Scrape a single URL\n *\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document with requested formats\n *\n * @example\n * ```typescript\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html'],\n * onlyMainContent: true\n * });\n * console.log(doc.markdown);\n * ```\n *\n * @example With LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const schema = z.object({\n * title: z.string(),\n * price: z.number(),\n * inStock: z.boolean()\n * });\n *\n * const doc = await client.scrape('https://example.com/product', {\n * engine: 'smart',\n * extract: {\n * schema,\n * systemPrompt: 'Extract product details',\n * provider: 'openai'\n * }\n * });\n * console.log(doc.extract?.data);\n * ```\n */\n async scrape(url: string, options?: ScrapeOptions): Promise<Document> {\n return scrape(this.http, url, options);\n }\n\n // ==========================================================================\n // Batch Scrape Methods\n // ==========================================================================\n\n /**\n * Start a batch scrape job (async)\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch scrape options\n * @returns Batch job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startBatchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * { options: { formats: ['markdown'] } }\n * );\n *\n * // Poll manually\n * let status = await client.getBatchScrapeStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getBatchScrapeStatus(id);\n * }\n * ```\n */\n async startBatchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeResponse> {\n return startBatchScrape(this.http, urls, options);\n }\n\n /**\n * Get batch scrape job status and data\n *\n * @param jobId - Batch job ID\n * @returns Current job status and scraped data\n */\n async getBatchScrapeStatus(jobId: string): Promise<BatchScrapeJob> {\n return getBatchScrapeStatus(this.http, jobId);\n }\n\n /**\n * Cancel a batch scrape job\n *\n * @param jobId - Batch job ID\n * @returns True if cancelled successfully\n */\n async cancelBatchScrape(jobId: string): Promise<boolean> {\n return cancelBatchScrape(this.http, jobId);\n }\n\n /**\n * Get batch scrape job errors\n *\n * @param jobId - Batch job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getBatchScrapeErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getBatchScrapeErrors(this.http, jobId);\n }\n\n /**\n * Batch scrape multiple URLs and wait for completion\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch options including pollInterval and timeout\n * @returns Final job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.batchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * {\n * options: { formats: ['markdown'], engine: 'smart' },\n * pollInterval: 2000,\n * timeout: 300\n * }\n * );\n *\n * console.log(`Scraped ${job.completed} URLs`);\n * job.data.forEach(doc => console.log(doc.url, doc.markdown?.length));\n * ```\n */\n async batchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeJob> {\n return batchScrape(this.http, urls, options);\n }\n\n // ==========================================================================\n // Crawl Methods\n // ==========================================================================\n\n /**\n * Start a crawl job (async)\n *\n * Use this method when you want to start a crawl and manage polling yourself.\n * For automatic polling, use the `crawl()` method instead.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startCrawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Poll for status manually\n * let status = await client.getCrawlStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getCrawlStatus(id);\n * }\n * ```\n */\n async startCrawl(url: string, options?: CrawlOptions): Promise<CrawlResponse> {\n return startCrawl(this.http, url, options);\n }\n\n /**\n * Get crawl job status and data\n *\n * @param jobId - Crawl job ID\n * @returns Current job status and scraped data\n */\n async getCrawlStatus(jobId: string): Promise<CrawlJob> {\n return getCrawlStatus(this.http, jobId);\n }\n\n /**\n * Cancel a crawl job\n *\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\n async cancelCrawl(jobId: string): Promise<boolean> {\n return cancelCrawl(this.http, jobId);\n }\n\n /**\n * Get crawl job errors and robots.txt blocks\n *\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getCrawlErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getCrawlErrors(this.http, jobId);\n }\n\n /**\n * Crawl a website and wait for completion\n *\n * This method starts a crawl job and automatically polls until completion.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic',\n * formats: ['markdown'],\n * pollInterval: 2000, // Poll every 2 seconds\n * timeout: 300 // 5 minute timeout\n * });\n *\n * console.log(`Crawled ${job.completed} pages`);\n * job.data.forEach(doc => console.log(doc.url));\n * ```\n */\n async crawl(url: string, options?: CrawlOptions): Promise<CrawlJob> {\n return crawl(this.http, url, options);\n }\n\n // ==========================================================================\n // Extract Methods (Standalone LLM Extraction)\n // ==========================================================================\n\n /**\n * Start an extract job (async)\n *\n * @param options - Extract request options\n * @returns Extract job ID or immediate result\n *\n * @example\n * ```typescript\n * const { id } = await client.startExtract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Poll manually\n * let status = await client.getExtractStatus(id);\n * while (status.status === 'processing') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getExtractStatus(id);\n * }\n * console.log(status.data);\n * ```\n */\n async startExtract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return startExtract(this.http, options);\n }\n\n /**\n * Get extract job status and data\n *\n * @param jobId - Extract job ID\n * @returns Current job status and extracted data\n */\n async getExtractStatus(jobId: string): Promise<ExtractResponse> {\n return getExtractStatus(this.http, jobId);\n }\n\n /**\n * Extract structured data from URLs using LLM and wait for completion\n *\n * @param options - Extract options including schema, prompt, and timeout\n * @returns Final extract result with structured data\n *\n * @example With Zod schema\n * ```typescript\n * import { z } from 'zod';\n *\n * const result = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: z.object({\n * name: z.string(),\n * price: z.number(),\n * inStock: z.boolean(),\n * features: z.array(z.string())\n * }),\n * systemPrompt: 'Extract product information from the page',\n * provider: 'openai',\n * timeout: 60\n * });\n *\n * console.log(result.data);\n * ```\n *\n * @example With natural language prompt\n * ```typescript\n * const result = await client.extract({\n * urls: ['https://example.com/about'],\n * prompt: 'Extract the company name, founding year, and list of team members',\n * enableWebSearch: true\n * });\n *\n * console.log(result.data);\n * ```\n */\n async extract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return extract(this.http, options);\n }\n\n // ==========================================================================\n // Map Methods\n // ==========================================================================\n\n /**\n * Map a website to discover all URLs\n *\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n *\n * @example\n * ```typescript\n * const result = await client.map('https://example.com', {\n * engine: 'dynamic'\n * });\n *\n * console.log(`Found ${result.count} URLs:`);\n * result.links.forEach(url => console.log(url));\n * ```\n */\n async map(url: string, options?: MapOptions): Promise<MapResponse> {\n return map(this.http, url, options);\n }\n\n // ==========================================================================\n // Search Methods\n // ==========================================================================\n\n /**\n * Search the web and optionally scrape results\n *\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n *\n * @example Basic search\n * ```typescript\n * const results = await client.search('best restaurants in NYC', {\n * limit: 10,\n * lang: 'en',\n * country: 'us'\n * });\n *\n * results.data.forEach(r => {\n * console.log(`${r.title}: ${r.url}`);\n * });\n * ```\n *\n * @example Search with scraping\n * ```typescript\n * const results = await client.search('best laptops 2024', {\n * limit: 5,\n * scrapeOptions: {\n * formats: ['markdown']\n * },\n * engine: 'smart'\n * });\n *\n * results.data.forEach(r => {\n * console.log(r.title);\n * console.log(r.markdown?.substring(0, 200));\n * });\n * ```\n *\n * @example Search with LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const results = await client.search('iPhone 15 Pro reviews', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] },\n * extract: {\n * schema: z.object({\n * pros: z.array(z.string()),\n * cons: z.array(z.string()),\n * rating: z.number()\n * }),\n * systemPrompt: 'Extract review summary from the content'\n * }\n * });\n *\n * console.log(results.extract?.data);\n * ```\n */\n async search(query: string, options?: SearchOptions): Promise<SearchResponse> {\n return search(this.http, query, options);\n }\n\n // ==========================================================================\n // Usage & Monitoring Methods\n // ==========================================================================\n\n /**\n * Get current concurrency usage\n *\n * @returns Current and max concurrency\n *\n * @example\n * ```typescript\n * const { concurrency, maxConcurrency } = await client.getConcurrency();\n * console.log(`Using ${concurrency}/${maxConcurrency} concurrent requests`);\n * ```\n */\n async getConcurrency(): Promise<ConcurrencyInfo> {\n return getConcurrency(this.http);\n }\n\n /**\n * Get current credit usage\n *\n * @returns Credit usage information\n *\n * @example\n * ```typescript\n * const credits = await client.getCreditUsage();\n * console.log(`Remaining credits: ${credits.remainingCredits}`);\n * ```\n */\n async getCreditUsage(): Promise<CreditUsage> {\n return getCreditUsage(this.http);\n }\n\n /**\n * Get current token usage (for LLM extraction)\n *\n * @returns Token usage information\n *\n * @example\n * ```typescript\n * const tokens = await client.getTokenUsage();\n * console.log(`Remaining tokens: ${tokens.remainingTokens}`);\n * ```\n */\n async getTokenUsage(): Promise<TokenUsage> {\n return getTokenUsage(this.http);\n }\n\n /**\n * Get queue status information\n *\n * @returns Queue status metrics\n *\n * @example\n * ```typescript\n * const queue = await client.getQueueStatus();\n * console.log(`Jobs in queue: ${queue.jobsInQueue}`);\n * console.log(`Active: ${queue.activeJobsInQueue}, Waiting: ${queue.waitingJobsInQueue}`);\n * ```\n */\n async getQueueStatus(): Promise<QueueStatus> {\n return getQueueStatus(this.http);\n }\n}\n\nexport default CrawlGateClient;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,mBAIO;;;ACDA,IAAM,iBAAN,MAAM,wBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA,EAIxB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YACE,SACA,YACA,MACA,SACA;AACA,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,UAAU;AAGf,QAAI,MAAM,mBAAmB;AAC3B,YAAM,kBAAkB,MAAM,eAAc;AAAA,IAC9C;AAAA,EACF;AACF;AAKO,IAAM,sBAAN,cAAkC,eAAe;AAAA,EACtD,YAAY,UAAkB,mBAAmB;AAC/C,UAAM,SAAS,KAAK,sBAAsB;AAC1C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA,EAClD,YAAY,SAAiB,SAAmB;AAC9C,UAAM,SAAS,KAAK,oBAAoB,OAAO;AAC/C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YAAY,OAAe,gBAAwB;AACjD;AAAA,MACE,aAAa,KAAK,4BAA4B,cAAc;AAAA,MAC5D;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AACZ,SAAK,QAAQ;AACb,SAAK,iBAAiB;AAAA,EACxB;AACF;AAKO,IAAM,0BAAN,cAAsC,eAAe;AAAA,EAC1D,YAAY,UAAkB,mCAAmC;AAC/D,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,iBAAN,cAA6B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIjC;AAAA,EAEhB,YAAY,UAAkB,uBAAuB,YAAqB;AACxE,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AACZ,SAAK,aAAa;AAAA,EACpB;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA,EAEhB,YAAY,SAAiB,UAAmB;AAC9C,UAAM,SAAS,QAAW,kBAAkB;AAC5C,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;AAKO,SAAS,cACd,QACA,MACO;AACP,QAAM,UAAU,KAAK,SAAS,KAAK,WAAW;AAE9C,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,YAAM,IAAI,gBAAgB,SAAS,KAAK,OAAO;AAAA,IACjD,KAAK;AACH,YAAM,IAAI,oBAAoB,OAAO;AAAA,IACvC,KAAK;AACH,YAAM,IAAI,eAAe,OAAO;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AACH,YAAM,IAAI,wBAAwB,OAAO;AAAA,IAC3C;AACE,YAAM,IAAI,eAAe,SAAS,QAAQ,QAAW,KAAK,OAAO;AAAA,EACrE;AACF;;;AD5GO,IAAM,aAAN,MAAiB;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAA4B;AACtC,SAAK,SAAS,QAAQ;AACtB,SAAK,SAAS,QAAQ,OAAO,QAAQ,OAAO,EAAE;AAC9C,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,gBAAgB,QAAQ,iBAAiB;AAE9C,SAAK,WAAW,aAAAA,QAAM,OAAO;AAAA,MAC3B,SAAS,KAAK;AAAA,MACd,SAAS,QAAQ,aAAa;AAAA,MAC9B,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,aAAa,KAAK;AAAA,MACpB;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKQ,MAAM,SAAgC;AAC5C,WAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,UAAU,GAAI,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,QAA0B;AACjD,WAAO,WAAW,OAAO,WAAW,OAAO,WAAW;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,QACZ,QAC2B;AAC3B,QAAI;AAEJ,aAAS,UAAU,GAAG,UAAU,KAAK,YAAY,WAAW;AAC1D,UAAI;AAEF,YACE,OAAO,UACP,CAAC,QAAQ,OAAO,OAAO,EAAE,SAAS,OAAO,OAAO,YAAY,CAAC,GAC7D;AACA,gBAAM,OAAQ,OAAO,QAAQ,CAAC;AAC9B,iBAAO,OAAO,EAAE,GAAG,MAAM,QAAQ,gBAAgB;AAGjD,cAAI,OAAO,KAAK,YAAY,UAAU;AACpC,mBAAO,UAAU,KAAK,UAAU;AAAA,UAClC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,KAAK,SAAS,QAAW,MAAM;AAGtD,YACE,KAAK,iBAAiB,SAAS,MAAM,KACrC,UAAU,KAAK,aAAa,GAC5B;AACA,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAEA,eAAO;AAAA,MACT,SAAS,KAAc;AACrB,cAAM,aAAa;AAKnB,oBACE,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,YAAY,OAAO,CAAC;AACpE,cAAM,SAAS,YAAY,UAAU;AAGrC,YAAI,KAAK,iBAAiB,MAAM,KAAK,UAAU,KAAK,aAAa,GAAG;AAClE,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAGA,YAAI,YAAY,UAAU;AACxB;AAAA,YACE,WAAW,SAAS;AAAA,YACpB,WAAW,SAAS;AAAA,UAKtB;AAAA,QACF;AAEA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,UAAM,aAAa,IAAI,eAAe,8BAA8B;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KACJ,UACA,MACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM;AAAA,MACN;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,IACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,OACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;AE5MA,gCAAgC;AAQhC,SAAS,cACP,QACyB;AAEzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,eAAO,2CAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQ,cAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,KACA,SACmB;AACnB,QAAM,OAAO,gBAAgB,KAAK,OAAO;AAEzC,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,SAAS,KAAK,MAAM;AACvB,UAAM,IAAI,eAAe,gCAAgC,QAAW,SAAS;AAAA,EAC/E;AAGA,QAAM,WAAqB;AAAA,IACzB,GAAG,SAAS,KAAK;AAAA,EACnB;AAEA,SAAO;AACT;;;AC5GA,SAAS,eACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,WACpB,MACA,KACA,SACwB;AACxB,QAAM,OAAO,eAAe,KAAK,OAAO;AAExC,QAAM,WAAW,MAAM,KAAK,KAAoB,aAAa,IAAI;AAEjE,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,OAAO,SAAS,KAAK;AAAA,IACrB,QAAQ,SAAS,KAAK,UAAU;AAAA,IAChC,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;AASA,eAAsB,eACpB,MACA,OACmB;AACnB,QAAM,WAAW,MAAM,KAAK,IAAc,aAAa,KAAK,EAAE;AAE9D,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,YACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,aAAa,KAAK;AAAA,EACpB;AAEA,SAAO,SAAS,KAAK,YAAY;AACnC;AAKA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAUA,eAAsB,MACpB,MACA,KACA,SACmB;AACnB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS,WAAW;AAGpC,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,WAAW,MAAM,KAAK,OAAO;AAEzD,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU;AAG5B,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,eAAe,MAAM,KAAK;AAG/C,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,KAAK,IAAI,IAAI,YAAY,WAAW;AACtC,YAAM,IAAI,gBAAgB,OAAO,OAAO;AAAA,IAC1C;AAGA,UAAM,MAAM,YAAY;AAAA,EAC1B;AACF;AASA,eAAsB,eACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,aAAa,KAAK,SAAS;AAE9B,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;;;AC9MA,SAAS,aACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,IACpB,MACA,KACA,SACsB;AACtB,QAAM,OAAO,aAAa,KAAK,OAAO;AAEtC,QAAM,WAAW,MAAM,KAAK,KAAkB,WAAW,IAAI;AAE7D,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,OAAO,SAAS,KAAK,SAAS,CAAC;AAAA,IAC/B,OAAO,SAAS,KAAK,SAAS,SAAS,KAAK,OAAO,UAAU;AAAA,IAC7D,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;;;AC1DA,IAAAC,6BAAgC;AAQhC,SAASC,eACP,QACyB;AACzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,eAAO,4CAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,OACA,SACyB;AACzB,QAAM,OAAgC,EAAE,MAAM;AAE9C,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,MAAM;AACjB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,eAAe;AAC1B,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQA,eAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,OACA,SACyB;AACzB,QAAM,OAAO,gBAAgB,OAAO,OAAO;AAE3C,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,cAAc,SAAS,KAAK;AAAA,IAC5B,YAAY,SAAS,KAAK;AAAA,IAC1B,SAAS,SAAS,KAAK;AAAA,EACzB;AACF;;;AC9FA,SAAS,eACP,MACA,SACyB;AACzB,QAAM,OAAgC,EAAE,KAAK;AAE7C,MAAI,SAAS,SAAS;AAEpB,UAAM,aAAa,QAAQ;AAC3B,QAAI,WAAW,OAAQ,MAAK,SAAS,WAAW;AAChD,QAAI,WAAW,QAAS,MAAK,UAAU,WAAW;AAClD,QAAI,WAAW,oBAAoB,OAAW,MAAK,kBAAkB,WAAW;AAChF,QAAI,WAAW,YAAa,MAAK,cAAc,WAAW;AAC1D,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,MAAO,MAAK,QAAQ,WAAW;AAAA,EAChD;AAEA,MAAI,SAAS,WAAW,MAAM;AAC5B,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,cAAc,MAAM;AAC/B,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,SAAS,qBAAqB,MAAM;AACtC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,SAAS,kBAAkB,MAAM;AACnC,SAAK,iBAAiB,QAAQ;AAAA,EAChC;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,iBACpB,MACA,MACA,SAC8B;AAC9B,MAAI,CAAC,MAAM,QAAQ,IAAI,KAAK,KAAK,WAAW,GAAG;AAC7C,UAAM,IAAI,eAAe,8BAA8B,KAAK,kBAAkB;AAAA,EAChF;AAEA,QAAM,OAAO,eAAe,MAAM,OAAO;AAEzC,QAAM,UAAkC,CAAC;AACzC,MAAI,SAAS,gBAAgB;AAC3B,YAAQ,iBAAiB,IAAI,QAAQ;AAAA,EACvC;AAEA,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B;AAAA,IACA;AAAA,IACA,OAAO,KAAK,OAAO,EAAE,SAAS,IAAI,UAAU;AAAA,EAC9C;AAEA,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,KAAK,SAAS,KAAK;AAAA,IACnB,aAAa,SAAS,KAAK;AAAA,EAC7B;AACF;AASA,eAAsB,qBACpB,MACA,OACyB;AACzB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,aAAa,SAAS,KAAK;AAAA,IAC3B,WAAW,SAAS,KAAK;AAAA,IACzB,MAAM,SAAS,KAAK,QAAQ;AAAA,IAC5B,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,kBACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO,SAAS,KAAK,WAAW,eAAe,SAAS,KAAK,YAAY;AAC3E;AASA,eAAsB,qBACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,oBAAoB,KAAK,SAAS;AAErC,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;AAKA,SAASC,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,uBACb,MACA,OACA,eAAuB,KACvB,SACyB;AACzB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,qBAAqB,MAAM,KAAK;AAGrD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AAUA,eAAsB,YACpB,MACA,MACA,SACyB;AACzB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS;AAGzB,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,iBAAiB,MAAM,MAAM,OAAO;AAGhE,SAAO,uBAAuB,MAAM,OAAO,cAAc,OAAO;AAClE;;;ACjQA,IAAAC,6BAAgC;AAQhC,SAAS,YAAY,OAAqC;AACxD,SACE,UAAU,QACV,OAAO,UAAU,YACjB,UAAU,UACT,OAAQ,MAAc,cAAc,cAAc,OAAQ,MAAc,UAAU;AAEvF;AAKA,SAASC,eACP,QACyB;AACzB,MAAI,YAAY,MAAM,GAAG;AACvB,eAAO,4CAAgB,MAAM;AAAA,EAC/B;AACA,SAAO;AACT;AAKA,SAAS,iBACP,SACyB;AACzB,QAAM,OAAgC,CAAC;AAEvC,MAAI,QAAQ,MAAM;AAChB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAASA,eAAc,QAAQ,MAAM;AAAA,EAC5C;AAEA,MAAI,QAAQ,gBAAgB,MAAM;AAChC,SAAK,eAAe,QAAQ;AAAA,EAC9B;AAEA,MAAI,QAAQ,sBAAsB,MAAM;AACtC,SAAK,qBAAqB,QAAQ;AAAA,EACpC;AAEA,MAAI,QAAQ,mBAAmB,MAAM;AACnC,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,QAAQ,eAAe,MAAM;AAC/B,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,QAAQ,qBAAqB,MAAM;AACrC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,QAAQ,UAAU;AACpB,SAAK,WAAW,QAAQ;AAAA,EAC1B;AAEA,MAAI,QAAQ,WAAW;AACrB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,QAAQ,eAAe;AACzB,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,SAAO;AACT;AASA,eAAsB,aACpB,MACA,SAC0B;AAC1B,QAAM,OAAO,iBAAiB,OAAO;AAErC,QAAM,WAAW,MAAM,KAAK,KAAsB,eAAe,IAAI;AAErE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AASA,eAAsB,iBACpB,MACA,OAC0B;AAC1B,QAAM,WAAW,MAAM,KAAK,IAAqB,eAAe,KAAK,EAAE;AAEvE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AAKA,SAASC,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,yBACb,MACA,OACA,eAAuB,KACvB,SAC0B;AAC1B,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,iBAAiB,MAAM,KAAK;AAGjD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AA8BA,eAAsB,QACpB,MACA,SAC0B;AAC1B,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,UAAU,QAAQ;AAGxB,QAAM,UAAU,MAAM,aAAa,MAAM,OAAO;AAGhD,MAAI,CAAC,QAAQ,IAAI;AACf,WAAO;AAAA,EACT;AAGA,MAAI,QAAQ,WAAW,aAAa;AAClC,WAAO;AAAA,EACT;AAGA,SAAO,yBAAyB,MAAM,QAAQ,IAAI,cAAc,OAAO;AACzE;;;AC3OA,eAAsB,eAAe,MAA4C;AAC/E,QAAM,WAAW,MAAM,KAAK,IAAqB,iBAAiB;AAElE,SAAO;AAAA,IACL,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,EAClD;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,aAAa;AAE1D,SAAO;AAAA,IACL,kBAAkB,SAAS,KAAK,oBAAoB;AAAA,IACpD,aAAa,SAAS,KAAK;AAAA,IAC3B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,cAAc,MAAuC;AACzE,QAAM,WAAW,MAAM,KAAK,IAAgB,YAAY;AAExD,SAAO;AAAA,IACL,iBAAiB,SAAS,KAAK,mBAAmB;AAAA,IAClD,YAAY,SAAS,KAAK;AAAA,IAC1B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,WAAW;AAExD,SAAO;AAAA,IACL,SAAS,SAAS,KAAK,WAAW;AAAA,IAClC,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,mBAAmB,SAAS,KAAK,qBAAqB;AAAA,IACtD,oBAAoB,SAAS,KAAK,sBAAsB;AAAA,IACxD,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,IAChD,mBAAmB,SAAS,KAAK;AAAA,EACnC;AACF;;;ACYO,IAAM,kBAAN,MAAsB;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQjB,YAAY,UAAkC,CAAC,GAAG;AAChD,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,qBAAqB;AAClE,UAAM,UACJ,QAAQ,UACR,QAAQ,IAAI,qBACZ,4BACA,QAAQ,OAAO,EAAE;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,OAAO,IAAI,WAAW;AAAA,MACzB;AAAA,MACA;AAAA,MACA,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,eAAe,QAAQ;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,KAAa,SAA4C;AACpE,WAAO,OAAO,KAAK,MAAM,KAAK,OAAO;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BA,MAAM,iBACJ,MACA,SAC8B;AAC9B,WAAO,iBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAAwC;AACjE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,kBAAkB,OAAiC;AACvD,WAAO,kBAAkB,KAAK,MAAM,KAAK;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAA6C;AACtE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,SACyB;AACzB,WAAO,YAAY,KAAK,MAAM,MAAM,OAAO;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+BA,MAAM,WAAW,KAAa,SAAgD;AAC5E,WAAO,WAAW,KAAK,MAAM,KAAK,OAAO;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAAkC;AACrD,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,OAAiC;AACjD,WAAO,YAAY,KAAK,MAAM,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAA6C;AAChE,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,MAAM,KAAa,SAA2C;AAClE,WAAO,MAAM,KAAK,MAAM,KAAK,OAAO;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,MAAM,aAAa,SAA0D;AAC3E,WAAO,aAAa,KAAK,MAAM,OAAO;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAiB,OAAyC;AAC9D,WAAO,iBAAiB,KAAK,MAAM,KAAK;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuCA,MAAM,QAAQ,SAA0D;AACtE,WAAO,QAAQ,KAAK,MAAM,OAAO;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,IAAI,KAAa,SAA4C;AACjE,WAAO,IAAI,KAAK,MAAM,KAAK,OAAO;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,MAAM,OAAO,OAAe,SAAkD;AAC5E,WAAO,OAAO,KAAK,MAAM,OAAO,OAAO;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,iBAA2C;AAC/C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,gBAAqC;AACzC,WAAO,cAAc,KAAK,IAAI;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AACF;","names":["axios","import_zod_to_json_schema","convertSchema","sleep","import_zod_to_json_schema","convertSchema","sleep"]}
1
+ {"version":3,"sources":["../src/index.ts","../src/utils/httpClient.ts","../src/errors.ts","../src/methods/scrape.ts","../src/methods/crawl.ts","../src/methods/map.ts","../src/methods/search.ts","../src/methods/batch.ts","../src/methods/extract.ts","../src/methods/usage.ts","../src/client.ts"],"sourcesContent":["/**\n * @crawlgate/sdk - Official JavaScript/TypeScript SDK for CrawlGate Search Engine API\n *\n * @packageDocumentation\n * @see https://docs.crawlgate.io\n */\n\n// Main client\nexport { CrawlGateClient, CrawlGateClient as default } from \"./client\";\n\n// Error classes\nexport {\n CrawlGateError,\n AuthenticationError,\n ValidationError,\n JobTimeoutError,\n ServiceUnavailableError,\n RateLimitError,\n ExtractionError,\n} from \"./errors\";\n\n// Types - Client\nexport type { CrawlGateClientOptions } from \"./types\";\n\n// Types - Engine & Options\nexport type {\n Engine,\n ProxyOption,\n FormatType,\n LLMProvider,\n JsonSchema,\n ExtractOptions,\n ExtractResult,\n} from \"./types\";\n\n// Types - Scrape\nexport type {\n ScrapeOptions,\n ScrapeResponse,\n Document,\n DocumentMetadata,\n} from \"./types\";\n\n// Types - Batch Scrape\nexport type {\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n WebhookConfig,\n} from \"./types\";\n\n// Types - Crawl\nexport type {\n CrawlOptions,\n CrawlResponse,\n CrawlJob,\n CrawlStatus,\n CrawlError,\n CrawlErrorsResponse,\n} from \"./types\";\n\n// Types - Extract\nexport type {\n ExtractRequestOptions,\n ExtractResponse,\n ExtractStatus,\n} from \"./types\";\n\n// Types - Map\nexport type { MapOptions, MapResponse } from \"./types\";\n\n// Types - Search\nexport type {\n SearchOptions,\n SearchResponse,\n SearchResult,\n} from \"./types\";\n\n// Types - Usage & Monitoring\nexport type {\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\n\n// Types - Pagination\nexport type { PaginationConfig } from \"./types\";\n","import axios, {\n type AxiosInstance,\n type AxiosRequestConfig,\n type AxiosResponse,\n} from \"axios\";\nimport { CrawlGateError, parseApiError } from \"../errors\";\n\n/**\n * HTTP Client configuration options\n */\nexport interface HttpClientOptions {\n /**\n * API key for authentication\n */\n apiKey: string;\n\n /**\n * Base URL for the API\n */\n apiUrl: string;\n\n /**\n * Request timeout in milliseconds\n */\n timeoutMs?: number;\n\n /**\n * Maximum number of retries\n */\n maxRetries?: number;\n\n /**\n * Backoff factor for retries (in seconds)\n */\n backoffFactor?: number;\n}\n\n/**\n * HTTP Client with retry logic and error handling\n */\nexport class HttpClient {\n private readonly instance: AxiosInstance;\n private readonly apiKey: string;\n private readonly apiUrl: string;\n private readonly maxRetries: number;\n private readonly backoffFactor: number;\n\n constructor(options: HttpClientOptions) {\n this.apiKey = options.apiKey;\n this.apiUrl = options.apiUrl.replace(/\\/$/, \"\");\n this.maxRetries = options.maxRetries ?? 3;\n this.backoffFactor = options.backoffFactor ?? 0.5;\n\n this.instance = axios.create({\n baseURL: this.apiUrl,\n timeout: options.timeoutMs ?? 90000,\n headers: {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": this.apiKey,\n },\n });\n }\n\n /**\n * Get the configured API URL\n */\n getApiUrl(): string {\n return this.apiUrl;\n }\n\n /**\n * Get the configured API key\n */\n getApiKey(): string {\n return this.apiKey;\n }\n\n /**\n * Sleep for specified seconds\n */\n private sleep(seconds: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, seconds * 1000));\n }\n\n /**\n * Check if error is retryable\n */\n private isRetryableError(status?: number): boolean {\n return status === 502 || status === 503 || status === 429;\n }\n\n /**\n * Make HTTP request with retry logic\n */\n private async request<T = unknown>(\n config: AxiosRequestConfig\n ): Promise<AxiosResponse<T>> {\n let lastError: Error | undefined;\n\n for (let attempt = 0; attempt < this.maxRetries; attempt++) {\n try {\n // Add SDK origin to request body for tracking\n if (\n config.method &&\n [\"post\", \"put\", \"patch\"].includes(config.method.toLowerCase())\n ) {\n const data = (config.data ?? {}) as Record<string, unknown>;\n config.data = { ...data, origin: \"crawlgate-sdk\" };\n\n // If timeout is specified in body, extend request timeout\n if (typeof data.timeout === \"number\") {\n config.timeout = data.timeout + 5000;\n }\n }\n\n const response = await this.instance.request<T>(config);\n\n // Check for retryable status codes even on \"successful\" responses\n if (\n this.isRetryableError(response.status) &&\n attempt < this.maxRetries - 1\n ) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n return response;\n } catch (err: unknown) {\n const axiosError = err as {\n response?: { status: number; data: unknown };\n message?: string;\n };\n\n lastError =\n err instanceof Error ? err : new Error(String(axiosError?.message));\n const status = axiosError?.response?.status;\n\n // Retry on retryable errors\n if (this.isRetryableError(status) && attempt < this.maxRetries - 1) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n // Parse and throw appropriate error\n if (axiosError?.response) {\n parseApiError(\n axiosError.response.status,\n axiosError.response.data as {\n error?: string;\n message?: string;\n details?: unknown;\n }\n );\n }\n\n throw lastError;\n }\n }\n\n throw lastError ?? new CrawlGateError(\"Unexpected HTTP client error\");\n }\n\n /**\n * Make POST request\n */\n async post<T = unknown>(\n endpoint: string,\n body: Record<string, unknown>,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"post\",\n url: endpoint,\n data: body,\n headers,\n });\n }\n\n /**\n * Make GET request\n */\n async get<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"get\",\n url: endpoint,\n headers,\n });\n }\n\n /**\n * Make DELETE request\n */\n async delete<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"delete\",\n url: endpoint,\n headers,\n });\n }\n}\n","/**\n * Base error class for CrawlGate SDK errors\n */\nexport class CrawlGateError extends Error {\n /**\n * HTTP status code (if applicable)\n */\n public readonly statusCode?: number;\n\n /**\n * Error code for programmatic handling\n */\n public readonly code?: string;\n\n /**\n * Additional error details\n */\n public readonly details?: unknown;\n\n constructor(\n message: string,\n statusCode?: number,\n code?: string,\n details?: unknown\n ) {\n super(message);\n this.name = \"CrawlGateError\";\n this.statusCode = statusCode;\n this.code = code;\n this.details = details;\n\n // Maintains proper stack trace for where error was thrown\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, CrawlGateError);\n }\n }\n}\n\n/**\n * Error thrown when authentication fails\n */\nexport class AuthenticationError extends CrawlGateError {\n constructor(message: string = \"Invalid API Key\") {\n super(message, 401, \"AUTHENTICATION_ERROR\");\n this.name = \"AuthenticationError\";\n }\n}\n\n/**\n * Error thrown when request validation fails\n */\nexport class ValidationError extends CrawlGateError {\n constructor(message: string, details?: unknown) {\n super(message, 400, \"VALIDATION_ERROR\", details);\n this.name = \"ValidationError\";\n }\n}\n\n/**\n * Error thrown when a crawl job times out\n */\nexport class JobTimeoutError extends CrawlGateError {\n /**\n * Job ID that timed out\n */\n public readonly jobId: string;\n\n /**\n * Timeout duration in seconds\n */\n public readonly timeoutSeconds: number;\n\n constructor(jobId: string, timeoutSeconds: number) {\n super(\n `Crawl job ${jobId} did not complete within ${timeoutSeconds} seconds`,\n undefined,\n \"JOB_TIMEOUT\"\n );\n this.name = \"JobTimeoutError\";\n this.jobId = jobId;\n this.timeoutSeconds = timeoutSeconds;\n }\n}\n\n/**\n * Error thrown when upstream service is unavailable\n */\nexport class ServiceUnavailableError extends CrawlGateError {\n constructor(message: string = \"Service temporarily unavailable\") {\n super(message, 503, \"SERVICE_UNAVAILABLE\");\n this.name = \"ServiceUnavailableError\";\n }\n}\n\n/**\n * Error thrown when rate limit is exceeded\n */\nexport class RateLimitError extends CrawlGateError {\n /**\n * Time to wait before retrying (in seconds)\n */\n public readonly retryAfter?: number;\n\n constructor(message: string = \"Rate limit exceeded\", retryAfter?: number) {\n super(message, 429, \"RATE_LIMIT_EXCEEDED\");\n this.name = \"RateLimitError\";\n this.retryAfter = retryAfter;\n }\n}\n\n/**\n * Error thrown when LLM extraction fails\n */\nexport class ExtractionError extends CrawlGateError {\n /**\n * Provider that failed\n */\n public readonly provider?: string;\n\n constructor(message: string, provider?: string) {\n super(message, undefined, \"EXTRACTION_ERROR\");\n this.name = \"ExtractionError\";\n this.provider = provider;\n }\n}\n\n/**\n * Parse error response from API and throw appropriate error\n */\nexport function parseApiError(\n status: number,\n data: { error?: string; message?: string; details?: unknown }\n): never {\n const message = data.error || data.message || \"Unknown error\";\n\n switch (status) {\n case 400:\n throw new ValidationError(message, data.details);\n case 401:\n throw new AuthenticationError(message);\n case 429:\n throw new RateLimitError(message);\n case 502:\n case 503:\n throw new ServiceUnavailableError(message);\n default:\n throw new CrawlGateError(message, status, undefined, data.details);\n }\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { ScrapeOptions, ScrapeResponse, Document } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n // Check if it's a Zod schema by looking for _def property\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for scrape endpoint\n */\nfunction buildScrapeBody(\n url: string,\n options?: ScrapeOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.waitFor !== undefined) {\n body.waitFor = options.waitFor;\n }\n\n if (options?.timeout !== undefined) {\n body.timeout = options.timeout;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Scrape a single URL\n *\n * @param http - HTTP client instance\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document\n */\nexport async function scrape(\n http: HttpClient,\n url: string,\n options?: ScrapeOptions\n): Promise<Document> {\n const body = buildScrapeBody(url, options);\n\n const response = await http.post<ScrapeResponse | Document>(\"/v1/scrape\", body);\n\n // Handle both wrapped response { success, data } and direct document response\n const data = response.data as Record<string, unknown>;\n\n // Check if it's an error response\n if (data.success === false) {\n throw new CrawlGateError(\n (data.error as string) || \"Scrape failed\",\n undefined,\n (data.code as string) || \"SCRAPE_ERROR\"\n );\n }\n\n // If response has success: true wrapper, extract data\n if (data.success === true && data.data) {\n return data.data as Document;\n }\n\n // Otherwise, response is the document itself (direct response)\n if (data.url || data.markdown || data.html) {\n return data as unknown as Document;\n }\n\n throw new CrawlGateError(\"No data returned from scrape\", undefined, \"NO_DATA\");\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { CrawlOptions, CrawlResponse, CrawlJob, CrawlErrorsResponse } from \"../types\";\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\n\n/**\n * Build request body for crawl endpoint\n */\nfunction buildCrawlBody(\n url: string,\n options?: CrawlOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Start a crawl job (async)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n */\nexport async function startCrawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlResponse> {\n const body = buildCrawlBody(url, options);\n\n const response = await http.post<CrawlResponse>(\"/v1/crawl\", body);\n\n if (!response.data.success && !response.data.id) {\n throw new CrawlGateError(\n \"Failed to start crawl job\",\n undefined,\n \"CRAWL_START_ERROR\"\n );\n }\n\n return {\n success: true,\n id: response.data.id,\n jobId: response.data.id,\n status: response.data.status || \"scraping\",\n engine: response.data.engine,\n };\n}\n\n/**\n * Get crawl job status\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Current job status and data\n */\nexport async function getCrawlStatus(\n http: HttpClient,\n jobId: string\n): Promise<CrawlJob> {\n const response = await http.get<CrawlJob>(`/v1/crawl/${jobId}`);\n\n return {\n id: response.data.id || jobId,\n status: response.data.status,\n total: response.data.total || 0,\n completed: response.data.completed || 0,\n data: response.data.data || [],\n engine: response.data.engine,\n error: response.data.error,\n };\n}\n\n/**\n * Cancel a crawl job\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\nexport async function cancelCrawl(\n http: HttpClient,\n jobId: string\n): Promise<boolean> {\n const response = await http.delete<{ success?: boolean; message?: string }>(\n `/v1/crawl/${jobId}`\n );\n\n return response.data.success !== false;\n}\n\n/**\n * Sleep for specified milliseconds\n */\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Crawl a website and wait for completion (waiter pattern)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all data\n */\nexport async function crawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlJob> {\n const pollInterval = options?.pollInterval ?? 2000; // 2 seconds default\n const timeout = options?.timeout ?? 300; // 5 minutes default (in seconds)\n\n // Start the crawl job\n const { id: jobId } = await startCrawl(http, url, options);\n\n const startTime = Date.now();\n const timeoutMs = timeout * 1000;\n\n // Poll until completion or timeout\n while (true) {\n const status = await getCrawlStatus(http, jobId);\n\n // Check for terminal states\n if (status.status === \"completed\") {\n return status;\n }\n\n if (status.status === \"failed\") {\n throw new CrawlGateError(\n status.error || \"Crawl job failed\",\n undefined,\n \"CRAWL_FAILED\"\n );\n }\n\n if (status.status === \"cancelled\") {\n throw new CrawlGateError(\n \"Crawl job was cancelled\",\n undefined,\n \"CRAWL_CANCELLED\"\n );\n }\n\n // Check for timeout\n if (Date.now() - startTime > timeoutMs) {\n throw new JobTimeoutError(jobId, timeout);\n }\n\n // Wait before next poll\n await sleep(pollInterval);\n }\n}\n\n/**\n * Get crawl job errors and robots.txt blocks\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\nexport async function getCrawlErrors(\n http: HttpClient,\n jobId: string\n): Promise<CrawlErrorsResponse> {\n const response = await http.get<{\n success?: boolean;\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\n errors?: Array<Record<string, string>>;\n robotsBlocked?: string[];\n }>(`/v1/crawl/${jobId}/errors`);\n\n const payload = response.data.data ?? response.data;\n return {\n errors: (payload.errors || []).map((e) => ({\n id: e.id || \"\",\n timestamp: e.timestamp,\n url: e.url || \"\",\n code: e.code,\n error: e.error || e.message || \"Unknown error\",\n })),\n robotsBlocked: payload.robotsBlocked || [],\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { MapOptions, MapResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Build request body for map endpoint\n */\nfunction buildMapBody(\n url: string,\n options?: MapOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Map a website to discover all URLs\n *\n * @param http - HTTP client instance\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n */\nexport async function map(\n http: HttpClient,\n url: string,\n options?: MapOptions\n): Promise<MapResponse> {\n const body = buildMapBody(url, options);\n\n const response = await http.post<MapResponse>(\"/v1/map\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Map failed\",\n undefined,\n \"MAP_ERROR\"\n );\n }\n\n return {\n success: true,\n links: response.data.links || [],\n count: response.data.count || response.data.links?.length || 0,\n engine: response.data.engine,\n };\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { SearchOptions, SearchResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for search endpoint\n */\nfunction buildSearchBody(\n query: string,\n options?: SearchOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { query };\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.lang) {\n body.lang = options.lang;\n }\n\n if (options?.country) {\n body.country = options.country;\n }\n\n if (options?.engines) {\n body.engines = options.engines;\n }\n\n if (options?.scrapeOptions) {\n body.scrapeOptions = options.scrapeOptions;\n }\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Search the web and optionally scrape results\n *\n * @param http - HTTP client instance\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n */\nexport async function search(\n http: HttpClient,\n query: string,\n options?: SearchOptions\n): Promise<SearchResponse> {\n const body = buildSearchBody(query, options);\n\n const response = await http.post<SearchResponse>(\"/v1/search\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Search failed\",\n undefined,\n \"SEARCH_ERROR\"\n );\n }\n\n return {\n success: true,\n data: response.data.data || [],\n query: response.data.query || query,\n totalResults: response.data.totalResults,\n searchTime: response.data.searchTime,\n extract: response.data.extract,\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type {\r\n BatchScrapeOptions,\r\n BatchScrapeResponse,\r\n BatchScrapeJob,\r\n CrawlErrorsResponse,\r\n Document,\r\n ScrapeOptions,\r\n} from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Build request body for batch scrape endpoint\r\n */\r\nfunction buildBatchBody(\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = { urls };\r\n\r\n if (options?.options) {\r\n // Spread scrape options into body\r\n const scrapeOpts = options.options;\r\n if (scrapeOpts.engine) body.engine = scrapeOpts.engine;\r\n if (scrapeOpts.formats) body.formats = scrapeOpts.formats;\r\n if (scrapeOpts.onlyMainContent !== undefined) body.onlyMainContent = scrapeOpts.onlyMainContent;\r\n if (scrapeOpts.excludeTags) body.excludeTags = scrapeOpts.excludeTags;\r\n if (scrapeOpts.waitFor !== undefined) body.waitFor = scrapeOpts.waitFor;\r\n if (scrapeOpts.timeout !== undefined) body.timeout = scrapeOpts.timeout;\r\n if (scrapeOpts.proxy) body.proxy = scrapeOpts.proxy;\r\n }\r\n\r\n if (options?.webhook != null) {\r\n body.webhook = options.webhook;\r\n }\r\n\r\n if (options?.appendToId != null) {\r\n body.appendToId = options.appendToId;\r\n }\r\n\r\n if (options?.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options?.maxConcurrency != null) {\r\n body.maxConcurrency = options.maxConcurrency;\r\n }\r\n\r\n if (options?.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start a batch scrape job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options\r\n * @returns Batch job ID and initial status\r\n */\r\nexport async function startBatchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeResponse> {\r\n if (!Array.isArray(urls) || urls.length === 0) {\r\n throw new CrawlGateError(\"URLs array cannot be empty\", 400, \"VALIDATION_ERROR\");\r\n }\r\n\r\n const body = buildBatchBody(urls, options);\r\n\r\n const headers: Record<string, string> = {};\r\n if (options?.idempotencyKey) {\r\n headers[\"Idempotency-Key\"] = options.idempotencyKey;\r\n }\r\n\r\n const response = await http.post<BatchScrapeResponse>(\r\n \"/v1/batch/scrape\",\r\n body,\r\n Object.keys(headers).length > 0 ? headers : undefined\r\n );\r\n\r\n if (!response.data.success && !response.data.id) {\r\n throw new CrawlGateError(\r\n response.data.error || \"Failed to start batch scrape job\",\r\n undefined,\r\n \"BATCH_START_ERROR\"\r\n );\r\n }\r\n\r\n return {\r\n success: true,\r\n id: response.data.id,\r\n url: response.data.url,\r\n invalidURLs: response.data.invalidURLs,\r\n };\r\n}\r\n\r\n/**\r\n * Get batch scrape job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getBatchScrapeStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<BatchScrapeJob> {\r\n const response = await http.get<BatchScrapeJob & { success?: boolean }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return {\r\n id: response.data.id || jobId,\r\n status: response.data.status,\r\n total: response.data.total || 0,\r\n completed: response.data.completed || 0,\r\n creditsUsed: response.data.creditsUsed,\r\n expiresAt: response.data.expiresAt,\r\n next: response.data.next ?? null,\r\n data: response.data.data || [],\r\n error: response.data.error,\r\n };\r\n}\r\n\r\n/**\r\n * Cancel a batch scrape job\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns True if cancelled successfully\r\n */\r\nexport async function cancelBatchScrape(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<boolean> {\r\n const response = await http.delete<{ success?: boolean; status?: string }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return response.data.status === \"cancelled\" || response.data.success !== false;\r\n}\r\n\r\n/**\r\n * Get batch scrape errors\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Errors and robots.txt blocks\r\n */\r\nexport async function getBatchScrapeErrors(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<CrawlErrorsResponse> {\r\n const response = await http.get<{\r\n success?: boolean;\r\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\r\n errors?: Array<Record<string, string>>;\r\n robotsBlocked?: string[];\r\n }>(`/v1/batch/scrape/${jobId}/errors`);\r\n\r\n const payload = response.data.data ?? response.data;\r\n return {\r\n errors: (payload.errors || []).map((e) => ({\r\n id: e.id || \"\",\r\n timestamp: e.timestamp,\r\n url: e.url || \"\",\r\n code: e.code,\r\n error: e.error || e.message || \"Unknown error\",\r\n })),\r\n robotsBlocked: payload.robotsBlocked || [],\r\n };\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for batch scrape job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final job status with all data\r\n */\r\nasync function waitForBatchCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<BatchScrapeJob> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getBatchScrapeStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Batch scrape job failed\",\r\n undefined,\r\n \"BATCH_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Batch scrape job was cancelled\",\r\n undefined,\r\n \"BATCH_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Batch scrape multiple URLs and wait for completion (waiter pattern)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options including pollInterval and timeout\r\n * @returns Final job with all scraped data\r\n */\r\nexport async function batchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeJob> {\r\n const pollInterval = options?.pollInterval ?? 2000;\r\n const timeout = options?.timeout;\r\n\r\n // Start the batch job\r\n const { id: jobId } = await startBatchScrape(http, urls, options);\r\n\r\n // Wait for completion\r\n return waitForBatchCompletion(http, jobId, pollInterval, timeout);\r\n}\r\n\r\n/**\r\n * Split URLs into chunks for large batch operations\r\n *\r\n * @param urls - Array of URLs\r\n * @param chunkSize - Maximum URLs per chunk (default: 100)\r\n * @returns Array of URL chunks\r\n */\r\nexport function chunkUrls(urls: string[], chunkSize: number = 100): string[][] {\r\n const chunks: string[][] = [];\r\n for (let i = 0; i < urls.length; i += chunkSize) {\r\n chunks.push(urls.slice(i, i + chunkSize));\r\n }\r\n return chunks;\r\n}\r\n","import type { ZodTypeAny } from \"zod\";\r\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\r\nimport type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ExtractRequestOptions, ExtractResponse, ScrapeOptions } from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Check if value is a Zod schema\r\n */\r\nfunction isZodSchema(value: unknown): value is ZodTypeAny {\r\n return (\r\n value !== null &&\r\n typeof value === \"object\" &&\r\n \"_def\" in value &&\r\n (typeof (value as any).safeParse === \"function\" || typeof (value as any).parse === \"function\")\r\n );\r\n}\r\n\r\n/**\r\n * Convert Zod schema to JSON Schema if needed\r\n */\r\nfunction convertSchema(\r\n schema: Record<string, unknown> | ZodTypeAny\r\n): Record<string, unknown> {\r\n if (isZodSchema(schema)) {\r\n return zodToJsonSchema(schema) as Record<string, unknown>;\r\n }\r\n return schema as Record<string, unknown>;\r\n}\r\n\r\n/**\r\n * Build request body for extract endpoint\r\n */\r\nfunction buildExtractBody(\r\n options: ExtractRequestOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = {};\r\n\r\n if (options.urls) {\r\n body.urls = options.urls;\r\n }\r\n\r\n if (options.prompt != null) {\r\n body.prompt = options.prompt;\r\n }\r\n\r\n if (options.schema != null) {\r\n body.schema = convertSchema(options.schema);\r\n }\r\n\r\n if (options.systemPrompt != null) {\r\n body.systemPrompt = options.systemPrompt;\r\n }\r\n\r\n if (options.allowExternalLinks != null) {\r\n body.allowExternalLinks = options.allowExternalLinks;\r\n }\r\n\r\n if (options.enableWebSearch != null) {\r\n body.enableWebSearch = options.enableWebSearch;\r\n }\r\n\r\n if (options.showSources != null) {\r\n body.showSources = options.showSources;\r\n }\r\n\r\n if (options.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options.provider) {\r\n body.provider = options.provider;\r\n }\r\n\r\n if (options.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n if (options.scrapeOptions) {\r\n body.scrapeOptions = options.scrapeOptions;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start an extract job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options\r\n * @returns Extract job ID or immediate result\r\n */\r\nexport async function startExtract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const body = buildExtractBody(options);\r\n\r\n const response = await http.post<ExtractResponse>(\"/v1/extract\", body);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Get extract job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getExtractStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<ExtractResponse> {\r\n const response = await http.get<ExtractResponse>(`/v1/extract/${jobId}`);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_STATUS_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for extract job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final extract result\r\n */\r\nasync function waitForExtractCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<ExtractResponse> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getExtractStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Extract job failed\",\r\n undefined,\r\n \"EXTRACT_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Extract job was cancelled\",\r\n undefined,\r\n \"EXTRACT_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Extract structured data from URLs using LLM (waiter pattern)\r\n *\r\n * This method starts an extract job and polls until completion.\r\n * For synchronous extracts (small payloads), it may return immediately.\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options including pollInterval and timeout\r\n * @returns Final extract result with structured data\r\n *\r\n * @example\r\n * ```typescript\r\n * import { z } from 'zod';\r\n *\r\n * const result = await extract(http, {\r\n * urls: ['https://example.com/product'],\r\n * schema: z.object({\r\n * name: z.string(),\r\n * price: z.number(),\r\n * inStock: z.boolean()\r\n * }),\r\n * systemPrompt: 'Extract product information',\r\n * provider: 'openai'\r\n * });\r\n *\r\n * console.log(result.data);\r\n * ```\r\n */\r\nexport async function extract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const pollInterval = options.pollInterval ?? 2000;\r\n const timeout = options.timeout;\r\n\r\n // Start the extract job\r\n const started = await startExtract(http, options);\r\n\r\n // If no job ID, it was a synchronous response\r\n if (!started.id) {\r\n return started;\r\n }\r\n\r\n // If already completed, return immediately\r\n if (started.status === \"completed\") {\r\n return started;\r\n }\r\n\r\n // Wait for completion\r\n return waitForExtractCompletion(http, started.id, pollInterval, timeout);\r\n}\r\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ConcurrencyInfo, CreditUsage, TokenUsage, QueueStatus } from \"../types\";\r\n\r\n/**\r\n * Get current concurrency usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Current and max concurrency\r\n */\r\nexport async function getConcurrency(http: HttpClient): Promise<ConcurrencyInfo> {\r\n const response = await http.get<ConcurrencyInfo>(\"/v1/concurrency\");\r\n\r\n return {\r\n concurrency: response.data.concurrency ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n };\r\n}\r\n\r\n/**\r\n * Get current credit usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Credit usage information\r\n */\r\nexport async function getCreditUsage(http: HttpClient): Promise<CreditUsage> {\r\n const response = await http.get<CreditUsage>(\"/v1/credits\");\r\n\r\n return {\r\n remainingCredits: response.data.remainingCredits ?? 0,\r\n planCredits: response.data.planCredits,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get current token usage (for LLM extraction)\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Token usage information\r\n */\r\nexport async function getTokenUsage(http: HttpClient): Promise<TokenUsage> {\r\n const response = await http.get<TokenUsage>(\"/v1/tokens\");\r\n\r\n return {\r\n remainingTokens: response.data.remainingTokens ?? 0,\r\n planTokens: response.data.planTokens,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get queue status information\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Queue status metrics\r\n */\r\nexport async function getQueueStatus(http: HttpClient): Promise<QueueStatus> {\r\n const response = await http.get<QueueStatus>(\"/v1/queue\");\r\n\r\n return {\r\n success: response.data.success ?? true,\r\n jobsInQueue: response.data.jobsInQueue ?? 0,\r\n activeJobsInQueue: response.data.activeJobsInQueue ?? 0,\r\n waitingJobsInQueue: response.data.waitingJobsInQueue ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n mostRecentSuccess: response.data.mostRecentSuccess,\r\n };\r\n}\r\n","import { HttpClient } from \"./utils/httpClient\";\nimport { scrape } from \"./methods/scrape\";\nimport { startCrawl, getCrawlStatus, cancelCrawl, crawl, getCrawlErrors } from \"./methods/crawl\";\nimport { map } from \"./methods/map\";\nimport { search } from \"./methods/search\";\nimport {\n startBatchScrape,\n getBatchScrapeStatus,\n cancelBatchScrape,\n batchScrape,\n getBatchScrapeErrors,\n} from \"./methods/batch\";\nimport { startExtract, getExtractStatus, extract } from \"./methods/extract\";\nimport { getConcurrency, getCreditUsage, getTokenUsage, getQueueStatus } from \"./methods/usage\";\nimport type {\n CrawlGateClientOptions,\n ScrapeOptions,\n CrawlOptions,\n MapOptions,\n SearchOptions,\n Document,\n CrawlResponse,\n CrawlJob,\n CrawlErrorsResponse,\n MapResponse,\n SearchResponse,\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n ExtractRequestOptions,\n ExtractResponse,\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\nimport { CrawlGateError } from \"./errors\";\n\n/**\n * CrawlGate SDK Client\n *\n * @example\n * ```typescript\n * import { CrawlGateClient } from '@crawlgate/sdk';\n *\n * const client = new CrawlGateClient({\n * apiKey: 'sk_live_...',\n * apiUrl: 'https://api.crawlgate.io'\n * });\n *\n * // Scrape a single URL\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html']\n * });\n *\n * // Batch scrape multiple URLs\n * const job = await client.batchScrape(['https://a.com', 'https://b.com'], {\n * options: { formats: ['markdown'] }\n * });\n *\n * // Crawl a website\n * const crawlJob = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Extract structured data with LLM\n * const extracted = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Search the web\n * const results = await client.search('best restaurants', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] }\n * });\n * ```\n */\nexport class CrawlGateClient {\n private readonly http: HttpClient;\n\n /**\n * Create a new CrawlGate client\n *\n * @param options - Client configuration options\n * @throws {CrawlGateError} If API key is not provided\n */\n constructor(options: CrawlGateClientOptions = {}) {\n const apiKey = options.apiKey ?? process.env.CRAWLGATE_API_KEY ?? \"\";\n const apiUrl = (\n options.apiUrl ??\n process.env.CRAWLGATE_API_URL ??\n \"https://api.crawlgate.io\"\n ).replace(/\\/$/, \"\");\n\n if (!apiKey) {\n throw new CrawlGateError(\n \"API key is required. Set CRAWLGATE_API_KEY env variable or pass apiKey option.\",\n undefined,\n \"MISSING_API_KEY\"\n );\n }\n\n this.http = new HttpClient({\n apiKey,\n apiUrl,\n timeoutMs: options.timeoutMs,\n maxRetries: options.maxRetries,\n backoffFactor: options.backoffFactor,\n });\n }\n\n // ==========================================================================\n // Scrape Methods\n // ==========================================================================\n\n /**\n * Scrape a single URL\n *\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document with requested formats\n *\n * @example\n * ```typescript\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html'],\n * onlyMainContent: true\n * });\n * console.log(doc.markdown);\n * ```\n *\n * @example With LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const schema = z.object({\n * title: z.string(),\n * price: z.number(),\n * inStock: z.boolean()\n * });\n *\n * const doc = await client.scrape('https://example.com/product', {\n * engine: 'smart',\n * extract: {\n * schema,\n * systemPrompt: 'Extract product details',\n * provider: 'openai'\n * }\n * });\n * console.log(doc.extract?.data);\n * ```\n */\n async scrape(url: string, options?: ScrapeOptions): Promise<Document> {\n return scrape(this.http, url, options);\n }\n\n // ==========================================================================\n // Batch Scrape Methods\n // ==========================================================================\n\n /**\n * Start a batch scrape job (async)\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch scrape options\n * @returns Batch job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startBatchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * { options: { formats: ['markdown'] } }\n * );\n *\n * // Poll manually\n * let status = await client.getBatchScrapeStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getBatchScrapeStatus(id);\n * }\n * ```\n */\n async startBatchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeResponse> {\n return startBatchScrape(this.http, urls, options);\n }\n\n /**\n * Get batch scrape job status and data\n *\n * @param jobId - Batch job ID\n * @returns Current job status and scraped data\n */\n async getBatchScrapeStatus(jobId: string): Promise<BatchScrapeJob> {\n return getBatchScrapeStatus(this.http, jobId);\n }\n\n /**\n * Cancel a batch scrape job\n *\n * @param jobId - Batch job ID\n * @returns True if cancelled successfully\n */\n async cancelBatchScrape(jobId: string): Promise<boolean> {\n return cancelBatchScrape(this.http, jobId);\n }\n\n /**\n * Get batch scrape job errors\n *\n * @param jobId - Batch job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getBatchScrapeErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getBatchScrapeErrors(this.http, jobId);\n }\n\n /**\n * Batch scrape multiple URLs and wait for completion\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch options including pollInterval and timeout\n * @returns Final job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.batchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * {\n * options: { formats: ['markdown'], engine: 'smart' },\n * pollInterval: 2000,\n * timeout: 300\n * }\n * );\n *\n * console.log(`Scraped ${job.completed} URLs`);\n * job.data.forEach(doc => console.log(doc.url, doc.markdown?.length));\n * ```\n */\n async batchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeJob> {\n return batchScrape(this.http, urls, options);\n }\n\n // ==========================================================================\n // Crawl Methods\n // ==========================================================================\n\n /**\n * Start a crawl job (async)\n *\n * Use this method when you want to start a crawl and manage polling yourself.\n * For automatic polling, use the `crawl()` method instead.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startCrawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Poll for status manually\n * let status = await client.getCrawlStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getCrawlStatus(id);\n * }\n * ```\n */\n async startCrawl(url: string, options?: CrawlOptions): Promise<CrawlResponse> {\n return startCrawl(this.http, url, options);\n }\n\n /**\n * Get crawl job status and data\n *\n * @param jobId - Crawl job ID\n * @returns Current job status and scraped data\n */\n async getCrawlStatus(jobId: string): Promise<CrawlJob> {\n return getCrawlStatus(this.http, jobId);\n }\n\n /**\n * Cancel a crawl job\n *\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\n async cancelCrawl(jobId: string): Promise<boolean> {\n return cancelCrawl(this.http, jobId);\n }\n\n /**\n * Get crawl job errors and robots.txt blocks\n *\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getCrawlErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getCrawlErrors(this.http, jobId);\n }\n\n /**\n * Crawl a website and wait for completion\n *\n * This method starts a crawl job and automatically polls until completion.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic',\n * formats: ['markdown'],\n * pollInterval: 2000, // Poll every 2 seconds\n * timeout: 300 // 5 minute timeout\n * });\n *\n * console.log(`Crawled ${job.completed} pages`);\n * job.data.forEach(doc => console.log(doc.url));\n * ```\n */\n async crawl(url: string, options?: CrawlOptions): Promise<CrawlJob> {\n return crawl(this.http, url, options);\n }\n\n // ==========================================================================\n // Extract Methods (Standalone LLM Extraction)\n // ==========================================================================\n\n /**\n * Start an extract job (async)\n *\n * @param options - Extract request options\n * @returns Extract job ID or immediate result\n *\n * @example\n * ```typescript\n * const { id } = await client.startExtract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Poll manually\n * let status = await client.getExtractStatus(id);\n * while (status.status === 'processing') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getExtractStatus(id);\n * }\n * console.log(status.data);\n * ```\n */\n async startExtract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return startExtract(this.http, options);\n }\n\n /**\n * Get extract job status and data\n *\n * @param jobId - Extract job ID\n * @returns Current job status and extracted data\n */\n async getExtractStatus(jobId: string): Promise<ExtractResponse> {\n return getExtractStatus(this.http, jobId);\n }\n\n /**\n * Extract structured data from URLs using LLM and wait for completion\n *\n * @param options - Extract options including schema, prompt, and timeout\n * @returns Final extract result with structured data\n *\n * @example With Zod schema\n * ```typescript\n * import { z } from 'zod';\n *\n * const result = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: z.object({\n * name: z.string(),\n * price: z.number(),\n * inStock: z.boolean(),\n * features: z.array(z.string())\n * }),\n * systemPrompt: 'Extract product information from the page',\n * provider: 'openai',\n * timeout: 60\n * });\n *\n * console.log(result.data);\n * ```\n *\n * @example With natural language prompt\n * ```typescript\n * const result = await client.extract({\n * urls: ['https://example.com/about'],\n * prompt: 'Extract the company name, founding year, and list of team members',\n * enableWebSearch: true\n * });\n *\n * console.log(result.data);\n * ```\n */\n async extract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return extract(this.http, options);\n }\n\n // ==========================================================================\n // Map Methods\n // ==========================================================================\n\n /**\n * Map a website to discover all URLs\n *\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n *\n * @example\n * ```typescript\n * const result = await client.map('https://example.com', {\n * engine: 'dynamic'\n * });\n *\n * console.log(`Found ${result.count} URLs:`);\n * result.links.forEach(url => console.log(url));\n * ```\n */\n async map(url: string, options?: MapOptions): Promise<MapResponse> {\n return map(this.http, url, options);\n }\n\n // ==========================================================================\n // Search Methods\n // ==========================================================================\n\n /**\n * Search the web and optionally scrape results\n *\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n *\n * @example Basic search\n * ```typescript\n * const results = await client.search('best restaurants in NYC', {\n * limit: 10,\n * lang: 'en',\n * country: 'us'\n * });\n *\n * results.data.forEach(r => {\n * console.log(`${r.title}: ${r.url}`);\n * });\n * ```\n *\n * @example Search with scraping\n * ```typescript\n * const results = await client.search('best laptops 2024', {\n * limit: 5,\n * scrapeOptions: {\n * formats: ['markdown']\n * },\n * engine: 'smart'\n * });\n *\n * results.data.forEach(r => {\n * console.log(r.title);\n * console.log(r.markdown?.substring(0, 200));\n * });\n * ```\n *\n * @example Search with LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const results = await client.search('iPhone 15 Pro reviews', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] },\n * extract: {\n * schema: z.object({\n * pros: z.array(z.string()),\n * cons: z.array(z.string()),\n * rating: z.number()\n * }),\n * systemPrompt: 'Extract review summary from the content'\n * }\n * });\n *\n * console.log(results.extract?.data);\n * ```\n */\n async search(query: string, options?: SearchOptions): Promise<SearchResponse> {\n return search(this.http, query, options);\n }\n\n // ==========================================================================\n // Usage & Monitoring Methods\n // ==========================================================================\n\n /**\n * Get current concurrency usage\n *\n * @returns Current and max concurrency\n *\n * @example\n * ```typescript\n * const { concurrency, maxConcurrency } = await client.getConcurrency();\n * console.log(`Using ${concurrency}/${maxConcurrency} concurrent requests`);\n * ```\n */\n async getConcurrency(): Promise<ConcurrencyInfo> {\n return getConcurrency(this.http);\n }\n\n /**\n * Get current credit usage\n *\n * @returns Credit usage information\n *\n * @example\n * ```typescript\n * const credits = await client.getCreditUsage();\n * console.log(`Remaining credits: ${credits.remainingCredits}`);\n * ```\n */\n async getCreditUsage(): Promise<CreditUsage> {\n return getCreditUsage(this.http);\n }\n\n /**\n * Get current token usage (for LLM extraction)\n *\n * @returns Token usage information\n *\n * @example\n * ```typescript\n * const tokens = await client.getTokenUsage();\n * console.log(`Remaining tokens: ${tokens.remainingTokens}`);\n * ```\n */\n async getTokenUsage(): Promise<TokenUsage> {\n return getTokenUsage(this.http);\n }\n\n /**\n * Get queue status information\n *\n * @returns Queue status metrics\n *\n * @example\n * ```typescript\n * const queue = await client.getQueueStatus();\n * console.log(`Jobs in queue: ${queue.jobsInQueue}`);\n * console.log(`Active: ${queue.activeJobsInQueue}, Waiting: ${queue.waitingJobsInQueue}`);\n * ```\n */\n async getQueueStatus(): Promise<QueueStatus> {\n return getQueueStatus(this.http);\n }\n}\n\nexport default CrawlGateClient;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,mBAIO;;;ACDA,IAAM,iBAAN,MAAM,wBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA,EAIxB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YACE,SACA,YACA,MACA,SACA;AACA,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,UAAU;AAGf,QAAI,MAAM,mBAAmB;AAC3B,YAAM,kBAAkB,MAAM,eAAc;AAAA,IAC9C;AAAA,EACF;AACF;AAKO,IAAM,sBAAN,cAAkC,eAAe;AAAA,EACtD,YAAY,UAAkB,mBAAmB;AAC/C,UAAM,SAAS,KAAK,sBAAsB;AAC1C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA,EAClD,YAAY,SAAiB,SAAmB;AAC9C,UAAM,SAAS,KAAK,oBAAoB,OAAO;AAC/C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YAAY,OAAe,gBAAwB;AACjD;AAAA,MACE,aAAa,KAAK,4BAA4B,cAAc;AAAA,MAC5D;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AACZ,SAAK,QAAQ;AACb,SAAK,iBAAiB;AAAA,EACxB;AACF;AAKO,IAAM,0BAAN,cAAsC,eAAe;AAAA,EAC1D,YAAY,UAAkB,mCAAmC;AAC/D,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,iBAAN,cAA6B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIjC;AAAA,EAEhB,YAAY,UAAkB,uBAAuB,YAAqB;AACxE,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AACZ,SAAK,aAAa;AAAA,EACpB;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA,EAEhB,YAAY,SAAiB,UAAmB;AAC9C,UAAM,SAAS,QAAW,kBAAkB;AAC5C,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;AAKO,SAAS,cACd,QACA,MACO;AACP,QAAM,UAAU,KAAK,SAAS,KAAK,WAAW;AAE9C,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,YAAM,IAAI,gBAAgB,SAAS,KAAK,OAAO;AAAA,IACjD,KAAK;AACH,YAAM,IAAI,oBAAoB,OAAO;AAAA,IACvC,KAAK;AACH,YAAM,IAAI,eAAe,OAAO;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AACH,YAAM,IAAI,wBAAwB,OAAO;AAAA,IAC3C;AACE,YAAM,IAAI,eAAe,SAAS,QAAQ,QAAW,KAAK,OAAO;AAAA,EACrE;AACF;;;AD5GO,IAAM,aAAN,MAAiB;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAA4B;AACtC,SAAK,SAAS,QAAQ;AACtB,SAAK,SAAS,QAAQ,OAAO,QAAQ,OAAO,EAAE;AAC9C,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,gBAAgB,QAAQ,iBAAiB;AAE9C,SAAK,WAAW,aAAAA,QAAM,OAAO;AAAA,MAC3B,SAAS,KAAK;AAAA,MACd,SAAS,QAAQ,aAAa;AAAA,MAC9B,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,aAAa,KAAK;AAAA,MACpB;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKQ,MAAM,SAAgC;AAC5C,WAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,UAAU,GAAI,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,QAA0B;AACjD,WAAO,WAAW,OAAO,WAAW,OAAO,WAAW;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,QACZ,QAC2B;AAC3B,QAAI;AAEJ,aAAS,UAAU,GAAG,UAAU,KAAK,YAAY,WAAW;AAC1D,UAAI;AAEF,YACE,OAAO,UACP,CAAC,QAAQ,OAAO,OAAO,EAAE,SAAS,OAAO,OAAO,YAAY,CAAC,GAC7D;AACA,gBAAM,OAAQ,OAAO,QAAQ,CAAC;AAC9B,iBAAO,OAAO,EAAE,GAAG,MAAM,QAAQ,gBAAgB;AAGjD,cAAI,OAAO,KAAK,YAAY,UAAU;AACpC,mBAAO,UAAU,KAAK,UAAU;AAAA,UAClC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,KAAK,SAAS,QAAW,MAAM;AAGtD,YACE,KAAK,iBAAiB,SAAS,MAAM,KACrC,UAAU,KAAK,aAAa,GAC5B;AACA,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAEA,eAAO;AAAA,MACT,SAAS,KAAc;AACrB,cAAM,aAAa;AAKnB,oBACE,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,YAAY,OAAO,CAAC;AACpE,cAAM,SAAS,YAAY,UAAU;AAGrC,YAAI,KAAK,iBAAiB,MAAM,KAAK,UAAU,KAAK,aAAa,GAAG;AAClE,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAGA,YAAI,YAAY,UAAU;AACxB;AAAA,YACE,WAAW,SAAS;AAAA,YACpB,WAAW,SAAS;AAAA,UAKtB;AAAA,QACF;AAEA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,UAAM,aAAa,IAAI,eAAe,8BAA8B;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KACJ,UACA,MACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM;AAAA,MACN;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,IACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,OACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;AE5MA,gCAAgC;AAQhC,SAAS,cACP,QACyB;AAEzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,eAAO,2CAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQ,cAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,KACA,SACmB;AACnB,QAAM,OAAO,gBAAgB,KAAK,OAAO;AAEzC,QAAM,WAAW,MAAM,KAAK,KAAgC,cAAc,IAAI;AAG9E,QAAM,OAAO,SAAS;AAGtB,MAAI,KAAK,YAAY,OAAO;AAC1B,UAAM,IAAI;AAAA,MACP,KAAK,SAAoB;AAAA,MAC1B;AAAA,MACC,KAAK,QAAmB;AAAA,IAC3B;AAAA,EACF;AAGA,MAAI,KAAK,YAAY,QAAQ,KAAK,MAAM;AACtC,WAAO,KAAK;AAAA,EACd;AAGA,MAAI,KAAK,OAAO,KAAK,YAAY,KAAK,MAAM;AAC1C,WAAO;AAAA,EACT;AAEA,QAAM,IAAI,eAAe,gCAAgC,QAAW,SAAS;AAC/E;;;ACjHA,SAAS,eACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,WACpB,MACA,KACA,SACwB;AACxB,QAAM,OAAO,eAAe,KAAK,OAAO;AAExC,QAAM,WAAW,MAAM,KAAK,KAAoB,aAAa,IAAI;AAEjE,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,OAAO,SAAS,KAAK;AAAA,IACrB,QAAQ,SAAS,KAAK,UAAU;AAAA,IAChC,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;AASA,eAAsB,eACpB,MACA,OACmB;AACnB,QAAM,WAAW,MAAM,KAAK,IAAc,aAAa,KAAK,EAAE;AAE9D,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,YACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,aAAa,KAAK;AAAA,EACpB;AAEA,SAAO,SAAS,KAAK,YAAY;AACnC;AAKA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAUA,eAAsB,MACpB,MACA,KACA,SACmB;AACnB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS,WAAW;AAGpC,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,WAAW,MAAM,KAAK,OAAO;AAEzD,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU;AAG5B,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,eAAe,MAAM,KAAK;AAG/C,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,KAAK,IAAI,IAAI,YAAY,WAAW;AACtC,YAAM,IAAI,gBAAgB,OAAO,OAAO;AAAA,IAC1C;AAGA,UAAM,MAAM,YAAY;AAAA,EAC1B;AACF;AASA,eAAsB,eACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,aAAa,KAAK,SAAS;AAE9B,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;;;AC9MA,SAAS,aACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,IACpB,MACA,KACA,SACsB;AACtB,QAAM,OAAO,aAAa,KAAK,OAAO;AAEtC,QAAM,WAAW,MAAM,KAAK,KAAkB,WAAW,IAAI;AAE7D,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,OAAO,SAAS,KAAK,SAAS,CAAC;AAAA,IAC/B,OAAO,SAAS,KAAK,SAAS,SAAS,KAAK,OAAO,UAAU;AAAA,IAC7D,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;;;AC1DA,IAAAC,6BAAgC;AAQhC,SAASC,eACP,QACyB;AACzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,eAAO,4CAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,OACA,SACyB;AACzB,QAAM,OAAgC,EAAE,MAAM;AAE9C,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,MAAM;AACjB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,eAAe;AAC1B,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQA,eAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,OACA,SACyB;AACzB,QAAM,OAAO,gBAAgB,OAAO,OAAO;AAE3C,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,cAAc,SAAS,KAAK;AAAA,IAC5B,YAAY,SAAS,KAAK;AAAA,IAC1B,SAAS,SAAS,KAAK;AAAA,EACzB;AACF;;;AC9FA,SAAS,eACP,MACA,SACyB;AACzB,QAAM,OAAgC,EAAE,KAAK;AAE7C,MAAI,SAAS,SAAS;AAEpB,UAAM,aAAa,QAAQ;AAC3B,QAAI,WAAW,OAAQ,MAAK,SAAS,WAAW;AAChD,QAAI,WAAW,QAAS,MAAK,UAAU,WAAW;AAClD,QAAI,WAAW,oBAAoB,OAAW,MAAK,kBAAkB,WAAW;AAChF,QAAI,WAAW,YAAa,MAAK,cAAc,WAAW;AAC1D,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,MAAO,MAAK,QAAQ,WAAW;AAAA,EAChD;AAEA,MAAI,SAAS,WAAW,MAAM;AAC5B,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,cAAc,MAAM;AAC/B,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,SAAS,qBAAqB,MAAM;AACtC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,SAAS,kBAAkB,MAAM;AACnC,SAAK,iBAAiB,QAAQ;AAAA,EAChC;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,iBACpB,MACA,MACA,SAC8B;AAC9B,MAAI,CAAC,MAAM,QAAQ,IAAI,KAAK,KAAK,WAAW,GAAG;AAC7C,UAAM,IAAI,eAAe,8BAA8B,KAAK,kBAAkB;AAAA,EAChF;AAEA,QAAM,OAAO,eAAe,MAAM,OAAO;AAEzC,QAAM,UAAkC,CAAC;AACzC,MAAI,SAAS,gBAAgB;AAC3B,YAAQ,iBAAiB,IAAI,QAAQ;AAAA,EACvC;AAEA,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B;AAAA,IACA;AAAA,IACA,OAAO,KAAK,OAAO,EAAE,SAAS,IAAI,UAAU;AAAA,EAC9C;AAEA,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,KAAK,SAAS,KAAK;AAAA,IACnB,aAAa,SAAS,KAAK;AAAA,EAC7B;AACF;AASA,eAAsB,qBACpB,MACA,OACyB;AACzB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,aAAa,SAAS,KAAK;AAAA,IAC3B,WAAW,SAAS,KAAK;AAAA,IACzB,MAAM,SAAS,KAAK,QAAQ;AAAA,IAC5B,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,kBACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO,SAAS,KAAK,WAAW,eAAe,SAAS,KAAK,YAAY;AAC3E;AASA,eAAsB,qBACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,oBAAoB,KAAK,SAAS;AAErC,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;AAKA,SAASC,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,uBACb,MACA,OACA,eAAuB,KACvB,SACyB;AACzB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,qBAAqB,MAAM,KAAK;AAGrD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AAUA,eAAsB,YACpB,MACA,MACA,SACyB;AACzB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS;AAGzB,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,iBAAiB,MAAM,MAAM,OAAO;AAGhE,SAAO,uBAAuB,MAAM,OAAO,cAAc,OAAO;AAClE;;;ACjQA,IAAAC,6BAAgC;AAQhC,SAAS,YAAY,OAAqC;AACxD,SACE,UAAU,QACV,OAAO,UAAU,YACjB,UAAU,UACT,OAAQ,MAAc,cAAc,cAAc,OAAQ,MAAc,UAAU;AAEvF;AAKA,SAASC,eACP,QACyB;AACzB,MAAI,YAAY,MAAM,GAAG;AACvB,eAAO,4CAAgB,MAAM;AAAA,EAC/B;AACA,SAAO;AACT;AAKA,SAAS,iBACP,SACyB;AACzB,QAAM,OAAgC,CAAC;AAEvC,MAAI,QAAQ,MAAM;AAChB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAASA,eAAc,QAAQ,MAAM;AAAA,EAC5C;AAEA,MAAI,QAAQ,gBAAgB,MAAM;AAChC,SAAK,eAAe,QAAQ;AAAA,EAC9B;AAEA,MAAI,QAAQ,sBAAsB,MAAM;AACtC,SAAK,qBAAqB,QAAQ;AAAA,EACpC;AAEA,MAAI,QAAQ,mBAAmB,MAAM;AACnC,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,QAAQ,eAAe,MAAM;AAC/B,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,QAAQ,qBAAqB,MAAM;AACrC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,QAAQ,UAAU;AACpB,SAAK,WAAW,QAAQ;AAAA,EAC1B;AAEA,MAAI,QAAQ,WAAW;AACrB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,QAAQ,eAAe;AACzB,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,SAAO;AACT;AASA,eAAsB,aACpB,MACA,SAC0B;AAC1B,QAAM,OAAO,iBAAiB,OAAO;AAErC,QAAM,WAAW,MAAM,KAAK,KAAsB,eAAe,IAAI;AAErE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AASA,eAAsB,iBACpB,MACA,OAC0B;AAC1B,QAAM,WAAW,MAAM,KAAK,IAAqB,eAAe,KAAK,EAAE;AAEvE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AAKA,SAASC,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,yBACb,MACA,OACA,eAAuB,KACvB,SAC0B;AAC1B,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,iBAAiB,MAAM,KAAK;AAGjD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AA8BA,eAAsB,QACpB,MACA,SAC0B;AAC1B,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,UAAU,QAAQ;AAGxB,QAAM,UAAU,MAAM,aAAa,MAAM,OAAO;AAGhD,MAAI,CAAC,QAAQ,IAAI;AACf,WAAO;AAAA,EACT;AAGA,MAAI,QAAQ,WAAW,aAAa;AAClC,WAAO;AAAA,EACT;AAGA,SAAO,yBAAyB,MAAM,QAAQ,IAAI,cAAc,OAAO;AACzE;;;AC3OA,eAAsB,eAAe,MAA4C;AAC/E,QAAM,WAAW,MAAM,KAAK,IAAqB,iBAAiB;AAElE,SAAO;AAAA,IACL,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,EAClD;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,aAAa;AAE1D,SAAO;AAAA,IACL,kBAAkB,SAAS,KAAK,oBAAoB;AAAA,IACpD,aAAa,SAAS,KAAK;AAAA,IAC3B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,cAAc,MAAuC;AACzE,QAAM,WAAW,MAAM,KAAK,IAAgB,YAAY;AAExD,SAAO;AAAA,IACL,iBAAiB,SAAS,KAAK,mBAAmB;AAAA,IAClD,YAAY,SAAS,KAAK;AAAA,IAC1B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,WAAW;AAExD,SAAO;AAAA,IACL,SAAS,SAAS,KAAK,WAAW;AAAA,IAClC,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,mBAAmB,SAAS,KAAK,qBAAqB;AAAA,IACtD,oBAAoB,SAAS,KAAK,sBAAsB;AAAA,IACxD,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,IAChD,mBAAmB,SAAS,KAAK;AAAA,EACnC;AACF;;;ACYO,IAAM,kBAAN,MAAsB;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQjB,YAAY,UAAkC,CAAC,GAAG;AAChD,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,qBAAqB;AAClE,UAAM,UACJ,QAAQ,UACR,QAAQ,IAAI,qBACZ,4BACA,QAAQ,OAAO,EAAE;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,OAAO,IAAI,WAAW;AAAA,MACzB;AAAA,MACA;AAAA,MACA,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,eAAe,QAAQ;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,KAAa,SAA4C;AACpE,WAAO,OAAO,KAAK,MAAM,KAAK,OAAO;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BA,MAAM,iBACJ,MACA,SAC8B;AAC9B,WAAO,iBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAAwC;AACjE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,kBAAkB,OAAiC;AACvD,WAAO,kBAAkB,KAAK,MAAM,KAAK;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAA6C;AACtE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,SACyB;AACzB,WAAO,YAAY,KAAK,MAAM,MAAM,OAAO;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+BA,MAAM,WAAW,KAAa,SAAgD;AAC5E,WAAO,WAAW,KAAK,MAAM,KAAK,OAAO;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAAkC;AACrD,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,OAAiC;AACjD,WAAO,YAAY,KAAK,MAAM,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAA6C;AAChE,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,MAAM,KAAa,SAA2C;AAClE,WAAO,MAAM,KAAK,MAAM,KAAK,OAAO;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,MAAM,aAAa,SAA0D;AAC3E,WAAO,aAAa,KAAK,MAAM,OAAO;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAiB,OAAyC;AAC9D,WAAO,iBAAiB,KAAK,MAAM,KAAK;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuCA,MAAM,QAAQ,SAA0D;AACtE,WAAO,QAAQ,KAAK,MAAM,OAAO;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,IAAI,KAAa,SAA4C;AACjE,WAAO,IAAI,KAAK,MAAM,KAAK,OAAO;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,MAAM,OAAO,OAAe,SAAkD;AAC5E,WAAO,OAAO,KAAK,MAAM,OAAO,OAAO;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,iBAA2C;AAC/C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,gBAAqC;AACzC,WAAO,cAAc,KAAK,IAAI;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AACF;","names":["axios","import_zod_to_json_schema","convertSchema","sleep","import_zod_to_json_schema","convertSchema","sleep"]}
package/dist/index.js CHANGED
@@ -272,20 +272,21 @@ function buildScrapeBody(url, options) {
272
272
  async function scrape(http, url, options) {
273
273
  const body = buildScrapeBody(url, options);
274
274
  const response = await http.post("/v1/scrape", body);
275
- if (!response.data.success) {
275
+ const data = response.data;
276
+ if (data.success === false) {
276
277
  throw new CrawlGateError(
277
- response.data.error || "Scrape failed",
278
+ data.error || "Scrape failed",
278
279
  void 0,
279
- "SCRAPE_ERROR"
280
+ data.code || "SCRAPE_ERROR"
280
281
  );
281
282
  }
282
- if (!response.data.data) {
283
- throw new CrawlGateError("No data returned from scrape", void 0, "NO_DATA");
283
+ if (data.success === true && data.data) {
284
+ return data.data;
284
285
  }
285
- const document = {
286
- ...response.data.data
287
- };
288
- return document;
286
+ if (data.url || data.markdown || data.html) {
287
+ return data;
288
+ }
289
+ throw new CrawlGateError("No data returned from scrape", void 0, "NO_DATA");
289
290
  }
290
291
 
291
292
  // src/methods/crawl.ts
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/utils/httpClient.ts","../src/errors.ts","../src/methods/scrape.ts","../src/methods/crawl.ts","../src/methods/map.ts","../src/methods/search.ts","../src/methods/batch.ts","../src/methods/extract.ts","../src/methods/usage.ts","../src/client.ts"],"sourcesContent":["import axios, {\n type AxiosInstance,\n type AxiosRequestConfig,\n type AxiosResponse,\n} from \"axios\";\nimport { CrawlGateError, parseApiError } from \"../errors\";\n\n/**\n * HTTP Client configuration options\n */\nexport interface HttpClientOptions {\n /**\n * API key for authentication\n */\n apiKey: string;\n\n /**\n * Base URL for the API\n */\n apiUrl: string;\n\n /**\n * Request timeout in milliseconds\n */\n timeoutMs?: number;\n\n /**\n * Maximum number of retries\n */\n maxRetries?: number;\n\n /**\n * Backoff factor for retries (in seconds)\n */\n backoffFactor?: number;\n}\n\n/**\n * HTTP Client with retry logic and error handling\n */\nexport class HttpClient {\n private readonly instance: AxiosInstance;\n private readonly apiKey: string;\n private readonly apiUrl: string;\n private readonly maxRetries: number;\n private readonly backoffFactor: number;\n\n constructor(options: HttpClientOptions) {\n this.apiKey = options.apiKey;\n this.apiUrl = options.apiUrl.replace(/\\/$/, \"\");\n this.maxRetries = options.maxRetries ?? 3;\n this.backoffFactor = options.backoffFactor ?? 0.5;\n\n this.instance = axios.create({\n baseURL: this.apiUrl,\n timeout: options.timeoutMs ?? 90000,\n headers: {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": this.apiKey,\n },\n });\n }\n\n /**\n * Get the configured API URL\n */\n getApiUrl(): string {\n return this.apiUrl;\n }\n\n /**\n * Get the configured API key\n */\n getApiKey(): string {\n return this.apiKey;\n }\n\n /**\n * Sleep for specified seconds\n */\n private sleep(seconds: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, seconds * 1000));\n }\n\n /**\n * Check if error is retryable\n */\n private isRetryableError(status?: number): boolean {\n return status === 502 || status === 503 || status === 429;\n }\n\n /**\n * Make HTTP request with retry logic\n */\n private async request<T = unknown>(\n config: AxiosRequestConfig\n ): Promise<AxiosResponse<T>> {\n let lastError: Error | undefined;\n\n for (let attempt = 0; attempt < this.maxRetries; attempt++) {\n try {\n // Add SDK origin to request body for tracking\n if (\n config.method &&\n [\"post\", \"put\", \"patch\"].includes(config.method.toLowerCase())\n ) {\n const data = (config.data ?? {}) as Record<string, unknown>;\n config.data = { ...data, origin: \"crawlgate-sdk\" };\n\n // If timeout is specified in body, extend request timeout\n if (typeof data.timeout === \"number\") {\n config.timeout = data.timeout + 5000;\n }\n }\n\n const response = await this.instance.request<T>(config);\n\n // Check for retryable status codes even on \"successful\" responses\n if (\n this.isRetryableError(response.status) &&\n attempt < this.maxRetries - 1\n ) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n return response;\n } catch (err: unknown) {\n const axiosError = err as {\n response?: { status: number; data: unknown };\n message?: string;\n };\n\n lastError =\n err instanceof Error ? err : new Error(String(axiosError?.message));\n const status = axiosError?.response?.status;\n\n // Retry on retryable errors\n if (this.isRetryableError(status) && attempt < this.maxRetries - 1) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n // Parse and throw appropriate error\n if (axiosError?.response) {\n parseApiError(\n axiosError.response.status,\n axiosError.response.data as {\n error?: string;\n message?: string;\n details?: unknown;\n }\n );\n }\n\n throw lastError;\n }\n }\n\n throw lastError ?? new CrawlGateError(\"Unexpected HTTP client error\");\n }\n\n /**\n * Make POST request\n */\n async post<T = unknown>(\n endpoint: string,\n body: Record<string, unknown>,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"post\",\n url: endpoint,\n data: body,\n headers,\n });\n }\n\n /**\n * Make GET request\n */\n async get<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"get\",\n url: endpoint,\n headers,\n });\n }\n\n /**\n * Make DELETE request\n */\n async delete<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"delete\",\n url: endpoint,\n headers,\n });\n }\n}\n","/**\n * Base error class for CrawlGate SDK errors\n */\nexport class CrawlGateError extends Error {\n /**\n * HTTP status code (if applicable)\n */\n public readonly statusCode?: number;\n\n /**\n * Error code for programmatic handling\n */\n public readonly code?: string;\n\n /**\n * Additional error details\n */\n public readonly details?: unknown;\n\n constructor(\n message: string,\n statusCode?: number,\n code?: string,\n details?: unknown\n ) {\n super(message);\n this.name = \"CrawlGateError\";\n this.statusCode = statusCode;\n this.code = code;\n this.details = details;\n\n // Maintains proper stack trace for where error was thrown\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, CrawlGateError);\n }\n }\n}\n\n/**\n * Error thrown when authentication fails\n */\nexport class AuthenticationError extends CrawlGateError {\n constructor(message: string = \"Invalid API Key\") {\n super(message, 401, \"AUTHENTICATION_ERROR\");\n this.name = \"AuthenticationError\";\n }\n}\n\n/**\n * Error thrown when request validation fails\n */\nexport class ValidationError extends CrawlGateError {\n constructor(message: string, details?: unknown) {\n super(message, 400, \"VALIDATION_ERROR\", details);\n this.name = \"ValidationError\";\n }\n}\n\n/**\n * Error thrown when a crawl job times out\n */\nexport class JobTimeoutError extends CrawlGateError {\n /**\n * Job ID that timed out\n */\n public readonly jobId: string;\n\n /**\n * Timeout duration in seconds\n */\n public readonly timeoutSeconds: number;\n\n constructor(jobId: string, timeoutSeconds: number) {\n super(\n `Crawl job ${jobId} did not complete within ${timeoutSeconds} seconds`,\n undefined,\n \"JOB_TIMEOUT\"\n );\n this.name = \"JobTimeoutError\";\n this.jobId = jobId;\n this.timeoutSeconds = timeoutSeconds;\n }\n}\n\n/**\n * Error thrown when upstream service is unavailable\n */\nexport class ServiceUnavailableError extends CrawlGateError {\n constructor(message: string = \"Service temporarily unavailable\") {\n super(message, 503, \"SERVICE_UNAVAILABLE\");\n this.name = \"ServiceUnavailableError\";\n }\n}\n\n/**\n * Error thrown when rate limit is exceeded\n */\nexport class RateLimitError extends CrawlGateError {\n /**\n * Time to wait before retrying (in seconds)\n */\n public readonly retryAfter?: number;\n\n constructor(message: string = \"Rate limit exceeded\", retryAfter?: number) {\n super(message, 429, \"RATE_LIMIT_EXCEEDED\");\n this.name = \"RateLimitError\";\n this.retryAfter = retryAfter;\n }\n}\n\n/**\n * Error thrown when LLM extraction fails\n */\nexport class ExtractionError extends CrawlGateError {\n /**\n * Provider that failed\n */\n public readonly provider?: string;\n\n constructor(message: string, provider?: string) {\n super(message, undefined, \"EXTRACTION_ERROR\");\n this.name = \"ExtractionError\";\n this.provider = provider;\n }\n}\n\n/**\n * Parse error response from API and throw appropriate error\n */\nexport function parseApiError(\n status: number,\n data: { error?: string; message?: string; details?: unknown }\n): never {\n const message = data.error || data.message || \"Unknown error\";\n\n switch (status) {\n case 400:\n throw new ValidationError(message, data.details);\n case 401:\n throw new AuthenticationError(message);\n case 429:\n throw new RateLimitError(message);\n case 502:\n case 503:\n throw new ServiceUnavailableError(message);\n default:\n throw new CrawlGateError(message, status, undefined, data.details);\n }\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { ScrapeOptions, ScrapeResponse, Document } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n // Check if it's a Zod schema by looking for _def property\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for scrape endpoint\n */\nfunction buildScrapeBody(\n url: string,\n options?: ScrapeOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.waitFor !== undefined) {\n body.waitFor = options.waitFor;\n }\n\n if (options?.timeout !== undefined) {\n body.timeout = options.timeout;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Scrape a single URL\n *\n * @param http - HTTP client instance\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document\n */\nexport async function scrape(\n http: HttpClient,\n url: string,\n options?: ScrapeOptions\n): Promise<Document> {\n const body = buildScrapeBody(url, options);\n\n const response = await http.post<ScrapeResponse>(\"/v1/scrape\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Scrape failed\",\n undefined,\n \"SCRAPE_ERROR\"\n );\n }\n\n if (!response.data.data) {\n throw new CrawlGateError(\"No data returned from scrape\", undefined, \"NO_DATA\");\n }\n\n // Add engine info to the document\n const document: Document = {\n ...response.data.data,\n };\n\n return document;\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { CrawlOptions, CrawlResponse, CrawlJob, CrawlErrorsResponse } from \"../types\";\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\n\n/**\n * Build request body for crawl endpoint\n */\nfunction buildCrawlBody(\n url: string,\n options?: CrawlOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Start a crawl job (async)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n */\nexport async function startCrawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlResponse> {\n const body = buildCrawlBody(url, options);\n\n const response = await http.post<CrawlResponse>(\"/v1/crawl\", body);\n\n if (!response.data.success && !response.data.id) {\n throw new CrawlGateError(\n \"Failed to start crawl job\",\n undefined,\n \"CRAWL_START_ERROR\"\n );\n }\n\n return {\n success: true,\n id: response.data.id,\n jobId: response.data.id,\n status: response.data.status || \"scraping\",\n engine: response.data.engine,\n };\n}\n\n/**\n * Get crawl job status\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Current job status and data\n */\nexport async function getCrawlStatus(\n http: HttpClient,\n jobId: string\n): Promise<CrawlJob> {\n const response = await http.get<CrawlJob>(`/v1/crawl/${jobId}`);\n\n return {\n id: response.data.id || jobId,\n status: response.data.status,\n total: response.data.total || 0,\n completed: response.data.completed || 0,\n data: response.data.data || [],\n engine: response.data.engine,\n error: response.data.error,\n };\n}\n\n/**\n * Cancel a crawl job\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\nexport async function cancelCrawl(\n http: HttpClient,\n jobId: string\n): Promise<boolean> {\n const response = await http.delete<{ success?: boolean; message?: string }>(\n `/v1/crawl/${jobId}`\n );\n\n return response.data.success !== false;\n}\n\n/**\n * Sleep for specified milliseconds\n */\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Crawl a website and wait for completion (waiter pattern)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all data\n */\nexport async function crawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlJob> {\n const pollInterval = options?.pollInterval ?? 2000; // 2 seconds default\n const timeout = options?.timeout ?? 300; // 5 minutes default (in seconds)\n\n // Start the crawl job\n const { id: jobId } = await startCrawl(http, url, options);\n\n const startTime = Date.now();\n const timeoutMs = timeout * 1000;\n\n // Poll until completion or timeout\n while (true) {\n const status = await getCrawlStatus(http, jobId);\n\n // Check for terminal states\n if (status.status === \"completed\") {\n return status;\n }\n\n if (status.status === \"failed\") {\n throw new CrawlGateError(\n status.error || \"Crawl job failed\",\n undefined,\n \"CRAWL_FAILED\"\n );\n }\n\n if (status.status === \"cancelled\") {\n throw new CrawlGateError(\n \"Crawl job was cancelled\",\n undefined,\n \"CRAWL_CANCELLED\"\n );\n }\n\n // Check for timeout\n if (Date.now() - startTime > timeoutMs) {\n throw new JobTimeoutError(jobId, timeout);\n }\n\n // Wait before next poll\n await sleep(pollInterval);\n }\n}\n\n/**\n * Get crawl job errors and robots.txt blocks\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\nexport async function getCrawlErrors(\n http: HttpClient,\n jobId: string\n): Promise<CrawlErrorsResponse> {\n const response = await http.get<{\n success?: boolean;\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\n errors?: Array<Record<string, string>>;\n robotsBlocked?: string[];\n }>(`/v1/crawl/${jobId}/errors`);\n\n const payload = response.data.data ?? response.data;\n return {\n errors: (payload.errors || []).map((e) => ({\n id: e.id || \"\",\n timestamp: e.timestamp,\n url: e.url || \"\",\n code: e.code,\n error: e.error || e.message || \"Unknown error\",\n })),\n robotsBlocked: payload.robotsBlocked || [],\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { MapOptions, MapResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Build request body for map endpoint\n */\nfunction buildMapBody(\n url: string,\n options?: MapOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Map a website to discover all URLs\n *\n * @param http - HTTP client instance\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n */\nexport async function map(\n http: HttpClient,\n url: string,\n options?: MapOptions\n): Promise<MapResponse> {\n const body = buildMapBody(url, options);\n\n const response = await http.post<MapResponse>(\"/v1/map\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Map failed\",\n undefined,\n \"MAP_ERROR\"\n );\n }\n\n return {\n success: true,\n links: response.data.links || [],\n count: response.data.count || response.data.links?.length || 0,\n engine: response.data.engine,\n };\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { SearchOptions, SearchResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for search endpoint\n */\nfunction buildSearchBody(\n query: string,\n options?: SearchOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { query };\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.lang) {\n body.lang = options.lang;\n }\n\n if (options?.country) {\n body.country = options.country;\n }\n\n if (options?.engines) {\n body.engines = options.engines;\n }\n\n if (options?.scrapeOptions) {\n body.scrapeOptions = options.scrapeOptions;\n }\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Search the web and optionally scrape results\n *\n * @param http - HTTP client instance\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n */\nexport async function search(\n http: HttpClient,\n query: string,\n options?: SearchOptions\n): Promise<SearchResponse> {\n const body = buildSearchBody(query, options);\n\n const response = await http.post<SearchResponse>(\"/v1/search\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Search failed\",\n undefined,\n \"SEARCH_ERROR\"\n );\n }\n\n return {\n success: true,\n data: response.data.data || [],\n query: response.data.query || query,\n totalResults: response.data.totalResults,\n searchTime: response.data.searchTime,\n extract: response.data.extract,\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type {\r\n BatchScrapeOptions,\r\n BatchScrapeResponse,\r\n BatchScrapeJob,\r\n CrawlErrorsResponse,\r\n Document,\r\n ScrapeOptions,\r\n} from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Build request body for batch scrape endpoint\r\n */\r\nfunction buildBatchBody(\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = { urls };\r\n\r\n if (options?.options) {\r\n // Spread scrape options into body\r\n const scrapeOpts = options.options;\r\n if (scrapeOpts.engine) body.engine = scrapeOpts.engine;\r\n if (scrapeOpts.formats) body.formats = scrapeOpts.formats;\r\n if (scrapeOpts.onlyMainContent !== undefined) body.onlyMainContent = scrapeOpts.onlyMainContent;\r\n if (scrapeOpts.excludeTags) body.excludeTags = scrapeOpts.excludeTags;\r\n if (scrapeOpts.waitFor !== undefined) body.waitFor = scrapeOpts.waitFor;\r\n if (scrapeOpts.timeout !== undefined) body.timeout = scrapeOpts.timeout;\r\n if (scrapeOpts.proxy) body.proxy = scrapeOpts.proxy;\r\n }\r\n\r\n if (options?.webhook != null) {\r\n body.webhook = options.webhook;\r\n }\r\n\r\n if (options?.appendToId != null) {\r\n body.appendToId = options.appendToId;\r\n }\r\n\r\n if (options?.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options?.maxConcurrency != null) {\r\n body.maxConcurrency = options.maxConcurrency;\r\n }\r\n\r\n if (options?.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start a batch scrape job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options\r\n * @returns Batch job ID and initial status\r\n */\r\nexport async function startBatchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeResponse> {\r\n if (!Array.isArray(urls) || urls.length === 0) {\r\n throw new CrawlGateError(\"URLs array cannot be empty\", 400, \"VALIDATION_ERROR\");\r\n }\r\n\r\n const body = buildBatchBody(urls, options);\r\n\r\n const headers: Record<string, string> = {};\r\n if (options?.idempotencyKey) {\r\n headers[\"Idempotency-Key\"] = options.idempotencyKey;\r\n }\r\n\r\n const response = await http.post<BatchScrapeResponse>(\r\n \"/v1/batch/scrape\",\r\n body,\r\n Object.keys(headers).length > 0 ? headers : undefined\r\n );\r\n\r\n if (!response.data.success && !response.data.id) {\r\n throw new CrawlGateError(\r\n response.data.error || \"Failed to start batch scrape job\",\r\n undefined,\r\n \"BATCH_START_ERROR\"\r\n );\r\n }\r\n\r\n return {\r\n success: true,\r\n id: response.data.id,\r\n url: response.data.url,\r\n invalidURLs: response.data.invalidURLs,\r\n };\r\n}\r\n\r\n/**\r\n * Get batch scrape job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getBatchScrapeStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<BatchScrapeJob> {\r\n const response = await http.get<BatchScrapeJob & { success?: boolean }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return {\r\n id: response.data.id || jobId,\r\n status: response.data.status,\r\n total: response.data.total || 0,\r\n completed: response.data.completed || 0,\r\n creditsUsed: response.data.creditsUsed,\r\n expiresAt: response.data.expiresAt,\r\n next: response.data.next ?? null,\r\n data: response.data.data || [],\r\n error: response.data.error,\r\n };\r\n}\r\n\r\n/**\r\n * Cancel a batch scrape job\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns True if cancelled successfully\r\n */\r\nexport async function cancelBatchScrape(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<boolean> {\r\n const response = await http.delete<{ success?: boolean; status?: string }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return response.data.status === \"cancelled\" || response.data.success !== false;\r\n}\r\n\r\n/**\r\n * Get batch scrape errors\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Errors and robots.txt blocks\r\n */\r\nexport async function getBatchScrapeErrors(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<CrawlErrorsResponse> {\r\n const response = await http.get<{\r\n success?: boolean;\r\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\r\n errors?: Array<Record<string, string>>;\r\n robotsBlocked?: string[];\r\n }>(`/v1/batch/scrape/${jobId}/errors`);\r\n\r\n const payload = response.data.data ?? response.data;\r\n return {\r\n errors: (payload.errors || []).map((e) => ({\r\n id: e.id || \"\",\r\n timestamp: e.timestamp,\r\n url: e.url || \"\",\r\n code: e.code,\r\n error: e.error || e.message || \"Unknown error\",\r\n })),\r\n robotsBlocked: payload.robotsBlocked || [],\r\n };\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for batch scrape job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final job status with all data\r\n */\r\nasync function waitForBatchCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<BatchScrapeJob> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getBatchScrapeStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Batch scrape job failed\",\r\n undefined,\r\n \"BATCH_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Batch scrape job was cancelled\",\r\n undefined,\r\n \"BATCH_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Batch scrape multiple URLs and wait for completion (waiter pattern)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options including pollInterval and timeout\r\n * @returns Final job with all scraped data\r\n */\r\nexport async function batchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeJob> {\r\n const pollInterval = options?.pollInterval ?? 2000;\r\n const timeout = options?.timeout;\r\n\r\n // Start the batch job\r\n const { id: jobId } = await startBatchScrape(http, urls, options);\r\n\r\n // Wait for completion\r\n return waitForBatchCompletion(http, jobId, pollInterval, timeout);\r\n}\r\n\r\n/**\r\n * Split URLs into chunks for large batch operations\r\n *\r\n * @param urls - Array of URLs\r\n * @param chunkSize - Maximum URLs per chunk (default: 100)\r\n * @returns Array of URL chunks\r\n */\r\nexport function chunkUrls(urls: string[], chunkSize: number = 100): string[][] {\r\n const chunks: string[][] = [];\r\n for (let i = 0; i < urls.length; i += chunkSize) {\r\n chunks.push(urls.slice(i, i + chunkSize));\r\n }\r\n return chunks;\r\n}\r\n","import type { ZodTypeAny } from \"zod\";\r\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\r\nimport type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ExtractRequestOptions, ExtractResponse, ScrapeOptions } from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Check if value is a Zod schema\r\n */\r\nfunction isZodSchema(value: unknown): value is ZodTypeAny {\r\n return (\r\n value !== null &&\r\n typeof value === \"object\" &&\r\n \"_def\" in value &&\r\n (typeof (value as any).safeParse === \"function\" || typeof (value as any).parse === \"function\")\r\n );\r\n}\r\n\r\n/**\r\n * Convert Zod schema to JSON Schema if needed\r\n */\r\nfunction convertSchema(\r\n schema: Record<string, unknown> | ZodTypeAny\r\n): Record<string, unknown> {\r\n if (isZodSchema(schema)) {\r\n return zodToJsonSchema(schema) as Record<string, unknown>;\r\n }\r\n return schema as Record<string, unknown>;\r\n}\r\n\r\n/**\r\n * Build request body for extract endpoint\r\n */\r\nfunction buildExtractBody(\r\n options: ExtractRequestOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = {};\r\n\r\n if (options.urls) {\r\n body.urls = options.urls;\r\n }\r\n\r\n if (options.prompt != null) {\r\n body.prompt = options.prompt;\r\n }\r\n\r\n if (options.schema != null) {\r\n body.schema = convertSchema(options.schema);\r\n }\r\n\r\n if (options.systemPrompt != null) {\r\n body.systemPrompt = options.systemPrompt;\r\n }\r\n\r\n if (options.allowExternalLinks != null) {\r\n body.allowExternalLinks = options.allowExternalLinks;\r\n }\r\n\r\n if (options.enableWebSearch != null) {\r\n body.enableWebSearch = options.enableWebSearch;\r\n }\r\n\r\n if (options.showSources != null) {\r\n body.showSources = options.showSources;\r\n }\r\n\r\n if (options.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options.provider) {\r\n body.provider = options.provider;\r\n }\r\n\r\n if (options.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n if (options.scrapeOptions) {\r\n body.scrapeOptions = options.scrapeOptions;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start an extract job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options\r\n * @returns Extract job ID or immediate result\r\n */\r\nexport async function startExtract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const body = buildExtractBody(options);\r\n\r\n const response = await http.post<ExtractResponse>(\"/v1/extract\", body);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Get extract job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getExtractStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<ExtractResponse> {\r\n const response = await http.get<ExtractResponse>(`/v1/extract/${jobId}`);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_STATUS_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for extract job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final extract result\r\n */\r\nasync function waitForExtractCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<ExtractResponse> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getExtractStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Extract job failed\",\r\n undefined,\r\n \"EXTRACT_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Extract job was cancelled\",\r\n undefined,\r\n \"EXTRACT_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Extract structured data from URLs using LLM (waiter pattern)\r\n *\r\n * This method starts an extract job and polls until completion.\r\n * For synchronous extracts (small payloads), it may return immediately.\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options including pollInterval and timeout\r\n * @returns Final extract result with structured data\r\n *\r\n * @example\r\n * ```typescript\r\n * import { z } from 'zod';\r\n *\r\n * const result = await extract(http, {\r\n * urls: ['https://example.com/product'],\r\n * schema: z.object({\r\n * name: z.string(),\r\n * price: z.number(),\r\n * inStock: z.boolean()\r\n * }),\r\n * systemPrompt: 'Extract product information',\r\n * provider: 'openai'\r\n * });\r\n *\r\n * console.log(result.data);\r\n * ```\r\n */\r\nexport async function extract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const pollInterval = options.pollInterval ?? 2000;\r\n const timeout = options.timeout;\r\n\r\n // Start the extract job\r\n const started = await startExtract(http, options);\r\n\r\n // If no job ID, it was a synchronous response\r\n if (!started.id) {\r\n return started;\r\n }\r\n\r\n // If already completed, return immediately\r\n if (started.status === \"completed\") {\r\n return started;\r\n }\r\n\r\n // Wait for completion\r\n return waitForExtractCompletion(http, started.id, pollInterval, timeout);\r\n}\r\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ConcurrencyInfo, CreditUsage, TokenUsage, QueueStatus } from \"../types\";\r\n\r\n/**\r\n * Get current concurrency usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Current and max concurrency\r\n */\r\nexport async function getConcurrency(http: HttpClient): Promise<ConcurrencyInfo> {\r\n const response = await http.get<ConcurrencyInfo>(\"/v1/concurrency\");\r\n\r\n return {\r\n concurrency: response.data.concurrency ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n };\r\n}\r\n\r\n/**\r\n * Get current credit usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Credit usage information\r\n */\r\nexport async function getCreditUsage(http: HttpClient): Promise<CreditUsage> {\r\n const response = await http.get<CreditUsage>(\"/v1/credits\");\r\n\r\n return {\r\n remainingCredits: response.data.remainingCredits ?? 0,\r\n planCredits: response.data.planCredits,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get current token usage (for LLM extraction)\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Token usage information\r\n */\r\nexport async function getTokenUsage(http: HttpClient): Promise<TokenUsage> {\r\n const response = await http.get<TokenUsage>(\"/v1/tokens\");\r\n\r\n return {\r\n remainingTokens: response.data.remainingTokens ?? 0,\r\n planTokens: response.data.planTokens,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get queue status information\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Queue status metrics\r\n */\r\nexport async function getQueueStatus(http: HttpClient): Promise<QueueStatus> {\r\n const response = await http.get<QueueStatus>(\"/v1/queue\");\r\n\r\n return {\r\n success: response.data.success ?? true,\r\n jobsInQueue: response.data.jobsInQueue ?? 0,\r\n activeJobsInQueue: response.data.activeJobsInQueue ?? 0,\r\n waitingJobsInQueue: response.data.waitingJobsInQueue ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n mostRecentSuccess: response.data.mostRecentSuccess,\r\n };\r\n}\r\n","import { HttpClient } from \"./utils/httpClient\";\nimport { scrape } from \"./methods/scrape\";\nimport { startCrawl, getCrawlStatus, cancelCrawl, crawl, getCrawlErrors } from \"./methods/crawl\";\nimport { map } from \"./methods/map\";\nimport { search } from \"./methods/search\";\nimport {\n startBatchScrape,\n getBatchScrapeStatus,\n cancelBatchScrape,\n batchScrape,\n getBatchScrapeErrors,\n} from \"./methods/batch\";\nimport { startExtract, getExtractStatus, extract } from \"./methods/extract\";\nimport { getConcurrency, getCreditUsage, getTokenUsage, getQueueStatus } from \"./methods/usage\";\nimport type {\n CrawlGateClientOptions,\n ScrapeOptions,\n CrawlOptions,\n MapOptions,\n SearchOptions,\n Document,\n CrawlResponse,\n CrawlJob,\n CrawlErrorsResponse,\n MapResponse,\n SearchResponse,\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n ExtractRequestOptions,\n ExtractResponse,\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\nimport { CrawlGateError } from \"./errors\";\n\n/**\n * CrawlGate SDK Client\n *\n * @example\n * ```typescript\n * import { CrawlGateClient } from '@crawlgate/sdk';\n *\n * const client = new CrawlGateClient({\n * apiKey: 'sk_live_...',\n * apiUrl: 'https://api.crawlgate.io'\n * });\n *\n * // Scrape a single URL\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html']\n * });\n *\n * // Batch scrape multiple URLs\n * const job = await client.batchScrape(['https://a.com', 'https://b.com'], {\n * options: { formats: ['markdown'] }\n * });\n *\n * // Crawl a website\n * const crawlJob = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Extract structured data with LLM\n * const extracted = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Search the web\n * const results = await client.search('best restaurants', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] }\n * });\n * ```\n */\nexport class CrawlGateClient {\n private readonly http: HttpClient;\n\n /**\n * Create a new CrawlGate client\n *\n * @param options - Client configuration options\n * @throws {CrawlGateError} If API key is not provided\n */\n constructor(options: CrawlGateClientOptions = {}) {\n const apiKey = options.apiKey ?? process.env.CRAWLGATE_API_KEY ?? \"\";\n const apiUrl = (\n options.apiUrl ??\n process.env.CRAWLGATE_API_URL ??\n \"https://api.crawlgate.io\"\n ).replace(/\\/$/, \"\");\n\n if (!apiKey) {\n throw new CrawlGateError(\n \"API key is required. Set CRAWLGATE_API_KEY env variable or pass apiKey option.\",\n undefined,\n \"MISSING_API_KEY\"\n );\n }\n\n this.http = new HttpClient({\n apiKey,\n apiUrl,\n timeoutMs: options.timeoutMs,\n maxRetries: options.maxRetries,\n backoffFactor: options.backoffFactor,\n });\n }\n\n // ==========================================================================\n // Scrape Methods\n // ==========================================================================\n\n /**\n * Scrape a single URL\n *\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document with requested formats\n *\n * @example\n * ```typescript\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html'],\n * onlyMainContent: true\n * });\n * console.log(doc.markdown);\n * ```\n *\n * @example With LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const schema = z.object({\n * title: z.string(),\n * price: z.number(),\n * inStock: z.boolean()\n * });\n *\n * const doc = await client.scrape('https://example.com/product', {\n * engine: 'smart',\n * extract: {\n * schema,\n * systemPrompt: 'Extract product details',\n * provider: 'openai'\n * }\n * });\n * console.log(doc.extract?.data);\n * ```\n */\n async scrape(url: string, options?: ScrapeOptions): Promise<Document> {\n return scrape(this.http, url, options);\n }\n\n // ==========================================================================\n // Batch Scrape Methods\n // ==========================================================================\n\n /**\n * Start a batch scrape job (async)\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch scrape options\n * @returns Batch job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startBatchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * { options: { formats: ['markdown'] } }\n * );\n *\n * // Poll manually\n * let status = await client.getBatchScrapeStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getBatchScrapeStatus(id);\n * }\n * ```\n */\n async startBatchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeResponse> {\n return startBatchScrape(this.http, urls, options);\n }\n\n /**\n * Get batch scrape job status and data\n *\n * @param jobId - Batch job ID\n * @returns Current job status and scraped data\n */\n async getBatchScrapeStatus(jobId: string): Promise<BatchScrapeJob> {\n return getBatchScrapeStatus(this.http, jobId);\n }\n\n /**\n * Cancel a batch scrape job\n *\n * @param jobId - Batch job ID\n * @returns True if cancelled successfully\n */\n async cancelBatchScrape(jobId: string): Promise<boolean> {\n return cancelBatchScrape(this.http, jobId);\n }\n\n /**\n * Get batch scrape job errors\n *\n * @param jobId - Batch job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getBatchScrapeErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getBatchScrapeErrors(this.http, jobId);\n }\n\n /**\n * Batch scrape multiple URLs and wait for completion\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch options including pollInterval and timeout\n * @returns Final job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.batchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * {\n * options: { formats: ['markdown'], engine: 'smart' },\n * pollInterval: 2000,\n * timeout: 300\n * }\n * );\n *\n * console.log(`Scraped ${job.completed} URLs`);\n * job.data.forEach(doc => console.log(doc.url, doc.markdown?.length));\n * ```\n */\n async batchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeJob> {\n return batchScrape(this.http, urls, options);\n }\n\n // ==========================================================================\n // Crawl Methods\n // ==========================================================================\n\n /**\n * Start a crawl job (async)\n *\n * Use this method when you want to start a crawl and manage polling yourself.\n * For automatic polling, use the `crawl()` method instead.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startCrawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Poll for status manually\n * let status = await client.getCrawlStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getCrawlStatus(id);\n * }\n * ```\n */\n async startCrawl(url: string, options?: CrawlOptions): Promise<CrawlResponse> {\n return startCrawl(this.http, url, options);\n }\n\n /**\n * Get crawl job status and data\n *\n * @param jobId - Crawl job ID\n * @returns Current job status and scraped data\n */\n async getCrawlStatus(jobId: string): Promise<CrawlJob> {\n return getCrawlStatus(this.http, jobId);\n }\n\n /**\n * Cancel a crawl job\n *\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\n async cancelCrawl(jobId: string): Promise<boolean> {\n return cancelCrawl(this.http, jobId);\n }\n\n /**\n * Get crawl job errors and robots.txt blocks\n *\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getCrawlErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getCrawlErrors(this.http, jobId);\n }\n\n /**\n * Crawl a website and wait for completion\n *\n * This method starts a crawl job and automatically polls until completion.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic',\n * formats: ['markdown'],\n * pollInterval: 2000, // Poll every 2 seconds\n * timeout: 300 // 5 minute timeout\n * });\n *\n * console.log(`Crawled ${job.completed} pages`);\n * job.data.forEach(doc => console.log(doc.url));\n * ```\n */\n async crawl(url: string, options?: CrawlOptions): Promise<CrawlJob> {\n return crawl(this.http, url, options);\n }\n\n // ==========================================================================\n // Extract Methods (Standalone LLM Extraction)\n // ==========================================================================\n\n /**\n * Start an extract job (async)\n *\n * @param options - Extract request options\n * @returns Extract job ID or immediate result\n *\n * @example\n * ```typescript\n * const { id } = await client.startExtract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Poll manually\n * let status = await client.getExtractStatus(id);\n * while (status.status === 'processing') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getExtractStatus(id);\n * }\n * console.log(status.data);\n * ```\n */\n async startExtract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return startExtract(this.http, options);\n }\n\n /**\n * Get extract job status and data\n *\n * @param jobId - Extract job ID\n * @returns Current job status and extracted data\n */\n async getExtractStatus(jobId: string): Promise<ExtractResponse> {\n return getExtractStatus(this.http, jobId);\n }\n\n /**\n * Extract structured data from URLs using LLM and wait for completion\n *\n * @param options - Extract options including schema, prompt, and timeout\n * @returns Final extract result with structured data\n *\n * @example With Zod schema\n * ```typescript\n * import { z } from 'zod';\n *\n * const result = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: z.object({\n * name: z.string(),\n * price: z.number(),\n * inStock: z.boolean(),\n * features: z.array(z.string())\n * }),\n * systemPrompt: 'Extract product information from the page',\n * provider: 'openai',\n * timeout: 60\n * });\n *\n * console.log(result.data);\n * ```\n *\n * @example With natural language prompt\n * ```typescript\n * const result = await client.extract({\n * urls: ['https://example.com/about'],\n * prompt: 'Extract the company name, founding year, and list of team members',\n * enableWebSearch: true\n * });\n *\n * console.log(result.data);\n * ```\n */\n async extract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return extract(this.http, options);\n }\n\n // ==========================================================================\n // Map Methods\n // ==========================================================================\n\n /**\n * Map a website to discover all URLs\n *\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n *\n * @example\n * ```typescript\n * const result = await client.map('https://example.com', {\n * engine: 'dynamic'\n * });\n *\n * console.log(`Found ${result.count} URLs:`);\n * result.links.forEach(url => console.log(url));\n * ```\n */\n async map(url: string, options?: MapOptions): Promise<MapResponse> {\n return map(this.http, url, options);\n }\n\n // ==========================================================================\n // Search Methods\n // ==========================================================================\n\n /**\n * Search the web and optionally scrape results\n *\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n *\n * @example Basic search\n * ```typescript\n * const results = await client.search('best restaurants in NYC', {\n * limit: 10,\n * lang: 'en',\n * country: 'us'\n * });\n *\n * results.data.forEach(r => {\n * console.log(`${r.title}: ${r.url}`);\n * });\n * ```\n *\n * @example Search with scraping\n * ```typescript\n * const results = await client.search('best laptops 2024', {\n * limit: 5,\n * scrapeOptions: {\n * formats: ['markdown']\n * },\n * engine: 'smart'\n * });\n *\n * results.data.forEach(r => {\n * console.log(r.title);\n * console.log(r.markdown?.substring(0, 200));\n * });\n * ```\n *\n * @example Search with LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const results = await client.search('iPhone 15 Pro reviews', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] },\n * extract: {\n * schema: z.object({\n * pros: z.array(z.string()),\n * cons: z.array(z.string()),\n * rating: z.number()\n * }),\n * systemPrompt: 'Extract review summary from the content'\n * }\n * });\n *\n * console.log(results.extract?.data);\n * ```\n */\n async search(query: string, options?: SearchOptions): Promise<SearchResponse> {\n return search(this.http, query, options);\n }\n\n // ==========================================================================\n // Usage & Monitoring Methods\n // ==========================================================================\n\n /**\n * Get current concurrency usage\n *\n * @returns Current and max concurrency\n *\n * @example\n * ```typescript\n * const { concurrency, maxConcurrency } = await client.getConcurrency();\n * console.log(`Using ${concurrency}/${maxConcurrency} concurrent requests`);\n * ```\n */\n async getConcurrency(): Promise<ConcurrencyInfo> {\n return getConcurrency(this.http);\n }\n\n /**\n * Get current credit usage\n *\n * @returns Credit usage information\n *\n * @example\n * ```typescript\n * const credits = await client.getCreditUsage();\n * console.log(`Remaining credits: ${credits.remainingCredits}`);\n * ```\n */\n async getCreditUsage(): Promise<CreditUsage> {\n return getCreditUsage(this.http);\n }\n\n /**\n * Get current token usage (for LLM extraction)\n *\n * @returns Token usage information\n *\n * @example\n * ```typescript\n * const tokens = await client.getTokenUsage();\n * console.log(`Remaining tokens: ${tokens.remainingTokens}`);\n * ```\n */\n async getTokenUsage(): Promise<TokenUsage> {\n return getTokenUsage(this.http);\n }\n\n /**\n * Get queue status information\n *\n * @returns Queue status metrics\n *\n * @example\n * ```typescript\n * const queue = await client.getQueueStatus();\n * console.log(`Jobs in queue: ${queue.jobsInQueue}`);\n * console.log(`Active: ${queue.activeJobsInQueue}, Waiting: ${queue.waitingJobsInQueue}`);\n * ```\n */\n async getQueueStatus(): Promise<QueueStatus> {\n return getQueueStatus(this.http);\n }\n}\n\nexport default CrawlGateClient;\n"],"mappings":";AAAA,OAAO,WAIA;;;ACDA,IAAM,iBAAN,MAAM,wBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA,EAIxB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YACE,SACA,YACA,MACA,SACA;AACA,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,UAAU;AAGf,QAAI,MAAM,mBAAmB;AAC3B,YAAM,kBAAkB,MAAM,eAAc;AAAA,IAC9C;AAAA,EACF;AACF;AAKO,IAAM,sBAAN,cAAkC,eAAe;AAAA,EACtD,YAAY,UAAkB,mBAAmB;AAC/C,UAAM,SAAS,KAAK,sBAAsB;AAC1C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA,EAClD,YAAY,SAAiB,SAAmB;AAC9C,UAAM,SAAS,KAAK,oBAAoB,OAAO;AAC/C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YAAY,OAAe,gBAAwB;AACjD;AAAA,MACE,aAAa,KAAK,4BAA4B,cAAc;AAAA,MAC5D;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AACZ,SAAK,QAAQ;AACb,SAAK,iBAAiB;AAAA,EACxB;AACF;AAKO,IAAM,0BAAN,cAAsC,eAAe;AAAA,EAC1D,YAAY,UAAkB,mCAAmC;AAC/D,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,iBAAN,cAA6B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIjC;AAAA,EAEhB,YAAY,UAAkB,uBAAuB,YAAqB;AACxE,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AACZ,SAAK,aAAa;AAAA,EACpB;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA,EAEhB,YAAY,SAAiB,UAAmB;AAC9C,UAAM,SAAS,QAAW,kBAAkB;AAC5C,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;AAKO,SAAS,cACd,QACA,MACO;AACP,QAAM,UAAU,KAAK,SAAS,KAAK,WAAW;AAE9C,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,YAAM,IAAI,gBAAgB,SAAS,KAAK,OAAO;AAAA,IACjD,KAAK;AACH,YAAM,IAAI,oBAAoB,OAAO;AAAA,IACvC,KAAK;AACH,YAAM,IAAI,eAAe,OAAO;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AACH,YAAM,IAAI,wBAAwB,OAAO;AAAA,IAC3C;AACE,YAAM,IAAI,eAAe,SAAS,QAAQ,QAAW,KAAK,OAAO;AAAA,EACrE;AACF;;;AD5GO,IAAM,aAAN,MAAiB;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAA4B;AACtC,SAAK,SAAS,QAAQ;AACtB,SAAK,SAAS,QAAQ,OAAO,QAAQ,OAAO,EAAE;AAC9C,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,gBAAgB,QAAQ,iBAAiB;AAE9C,SAAK,WAAW,MAAM,OAAO;AAAA,MAC3B,SAAS,KAAK;AAAA,MACd,SAAS,QAAQ,aAAa;AAAA,MAC9B,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,aAAa,KAAK;AAAA,MACpB;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKQ,MAAM,SAAgC;AAC5C,WAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,UAAU,GAAI,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,QAA0B;AACjD,WAAO,WAAW,OAAO,WAAW,OAAO,WAAW;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,QACZ,QAC2B;AAC3B,QAAI;AAEJ,aAAS,UAAU,GAAG,UAAU,KAAK,YAAY,WAAW;AAC1D,UAAI;AAEF,YACE,OAAO,UACP,CAAC,QAAQ,OAAO,OAAO,EAAE,SAAS,OAAO,OAAO,YAAY,CAAC,GAC7D;AACA,gBAAM,OAAQ,OAAO,QAAQ,CAAC;AAC9B,iBAAO,OAAO,EAAE,GAAG,MAAM,QAAQ,gBAAgB;AAGjD,cAAI,OAAO,KAAK,YAAY,UAAU;AACpC,mBAAO,UAAU,KAAK,UAAU;AAAA,UAClC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,KAAK,SAAS,QAAW,MAAM;AAGtD,YACE,KAAK,iBAAiB,SAAS,MAAM,KACrC,UAAU,KAAK,aAAa,GAC5B;AACA,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAEA,eAAO;AAAA,MACT,SAAS,KAAc;AACrB,cAAM,aAAa;AAKnB,oBACE,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,YAAY,OAAO,CAAC;AACpE,cAAM,SAAS,YAAY,UAAU;AAGrC,YAAI,KAAK,iBAAiB,MAAM,KAAK,UAAU,KAAK,aAAa,GAAG;AAClE,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAGA,YAAI,YAAY,UAAU;AACxB;AAAA,YACE,WAAW,SAAS;AAAA,YACpB,WAAW,SAAS;AAAA,UAKtB;AAAA,QACF;AAEA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,UAAM,aAAa,IAAI,eAAe,8BAA8B;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KACJ,UACA,MACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM;AAAA,MACN;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,IACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,OACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;AE5MA,SAAS,uBAAuB;AAQhC,SAAS,cACP,QACyB;AAEzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,WAAO,gBAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQ,cAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,KACA,SACmB;AACnB,QAAM,OAAO,gBAAgB,KAAK,OAAO;AAEzC,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,SAAS,KAAK,MAAM;AACvB,UAAM,IAAI,eAAe,gCAAgC,QAAW,SAAS;AAAA,EAC/E;AAGA,QAAM,WAAqB;AAAA,IACzB,GAAG,SAAS,KAAK;AAAA,EACnB;AAEA,SAAO;AACT;;;AC5GA,SAAS,eACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,WACpB,MACA,KACA,SACwB;AACxB,QAAM,OAAO,eAAe,KAAK,OAAO;AAExC,QAAM,WAAW,MAAM,KAAK,KAAoB,aAAa,IAAI;AAEjE,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,OAAO,SAAS,KAAK;AAAA,IACrB,QAAQ,SAAS,KAAK,UAAU;AAAA,IAChC,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;AASA,eAAsB,eACpB,MACA,OACmB;AACnB,QAAM,WAAW,MAAM,KAAK,IAAc,aAAa,KAAK,EAAE;AAE9D,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,YACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,aAAa,KAAK;AAAA,EACpB;AAEA,SAAO,SAAS,KAAK,YAAY;AACnC;AAKA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAUA,eAAsB,MACpB,MACA,KACA,SACmB;AACnB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS,WAAW;AAGpC,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,WAAW,MAAM,KAAK,OAAO;AAEzD,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU;AAG5B,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,eAAe,MAAM,KAAK;AAG/C,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,KAAK,IAAI,IAAI,YAAY,WAAW;AACtC,YAAM,IAAI,gBAAgB,OAAO,OAAO;AAAA,IAC1C;AAGA,UAAM,MAAM,YAAY;AAAA,EAC1B;AACF;AASA,eAAsB,eACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,aAAa,KAAK,SAAS;AAE9B,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;;;AC9MA,SAAS,aACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,IACpB,MACA,KACA,SACsB;AACtB,QAAM,OAAO,aAAa,KAAK,OAAO;AAEtC,QAAM,WAAW,MAAM,KAAK,KAAkB,WAAW,IAAI;AAE7D,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,OAAO,SAAS,KAAK,SAAS,CAAC;AAAA,IAC/B,OAAO,SAAS,KAAK,SAAS,SAAS,KAAK,OAAO,UAAU;AAAA,IAC7D,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;;;AC1DA,SAAS,mBAAAA,wBAAuB;AAQhC,SAASC,eACP,QACyB;AACzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,WAAOC,iBAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,OACA,SACyB;AACzB,QAAM,OAAgC,EAAE,MAAM;AAE9C,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,MAAM;AACjB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,eAAe;AAC1B,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQD,eAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,OACA,SACyB;AACzB,QAAM,OAAO,gBAAgB,OAAO,OAAO;AAE3C,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,cAAc,SAAS,KAAK;AAAA,IAC5B,YAAY,SAAS,KAAK;AAAA,IAC1B,SAAS,SAAS,KAAK;AAAA,EACzB;AACF;;;AC9FA,SAAS,eACP,MACA,SACyB;AACzB,QAAM,OAAgC,EAAE,KAAK;AAE7C,MAAI,SAAS,SAAS;AAEpB,UAAM,aAAa,QAAQ;AAC3B,QAAI,WAAW,OAAQ,MAAK,SAAS,WAAW;AAChD,QAAI,WAAW,QAAS,MAAK,UAAU,WAAW;AAClD,QAAI,WAAW,oBAAoB,OAAW,MAAK,kBAAkB,WAAW;AAChF,QAAI,WAAW,YAAa,MAAK,cAAc,WAAW;AAC1D,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,MAAO,MAAK,QAAQ,WAAW;AAAA,EAChD;AAEA,MAAI,SAAS,WAAW,MAAM;AAC5B,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,cAAc,MAAM;AAC/B,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,SAAS,qBAAqB,MAAM;AACtC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,SAAS,kBAAkB,MAAM;AACnC,SAAK,iBAAiB,QAAQ;AAAA,EAChC;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,iBACpB,MACA,MACA,SAC8B;AAC9B,MAAI,CAAC,MAAM,QAAQ,IAAI,KAAK,KAAK,WAAW,GAAG;AAC7C,UAAM,IAAI,eAAe,8BAA8B,KAAK,kBAAkB;AAAA,EAChF;AAEA,QAAM,OAAO,eAAe,MAAM,OAAO;AAEzC,QAAM,UAAkC,CAAC;AACzC,MAAI,SAAS,gBAAgB;AAC3B,YAAQ,iBAAiB,IAAI,QAAQ;AAAA,EACvC;AAEA,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B;AAAA,IACA;AAAA,IACA,OAAO,KAAK,OAAO,EAAE,SAAS,IAAI,UAAU;AAAA,EAC9C;AAEA,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,KAAK,SAAS,KAAK;AAAA,IACnB,aAAa,SAAS,KAAK;AAAA,EAC7B;AACF;AASA,eAAsB,qBACpB,MACA,OACyB;AACzB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,aAAa,SAAS,KAAK;AAAA,IAC3B,WAAW,SAAS,KAAK;AAAA,IACzB,MAAM,SAAS,KAAK,QAAQ;AAAA,IAC5B,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,kBACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO,SAAS,KAAK,WAAW,eAAe,SAAS,KAAK,YAAY;AAC3E;AASA,eAAsB,qBACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,oBAAoB,KAAK,SAAS;AAErC,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;AAKA,SAASE,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,uBACb,MACA,OACA,eAAuB,KACvB,SACyB;AACzB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,qBAAqB,MAAM,KAAK;AAGrD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AAUA,eAAsB,YACpB,MACA,MACA,SACyB;AACzB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS;AAGzB,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,iBAAiB,MAAM,MAAM,OAAO;AAGhE,SAAO,uBAAuB,MAAM,OAAO,cAAc,OAAO;AAClE;;;ACjQA,SAAS,mBAAAC,wBAAuB;AAQhC,SAAS,YAAY,OAAqC;AACxD,SACE,UAAU,QACV,OAAO,UAAU,YACjB,UAAU,UACT,OAAQ,MAAc,cAAc,cAAc,OAAQ,MAAc,UAAU;AAEvF;AAKA,SAASC,eACP,QACyB;AACzB,MAAI,YAAY,MAAM,GAAG;AACvB,WAAOC,iBAAgB,MAAM;AAAA,EAC/B;AACA,SAAO;AACT;AAKA,SAAS,iBACP,SACyB;AACzB,QAAM,OAAgC,CAAC;AAEvC,MAAI,QAAQ,MAAM;AAChB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAASD,eAAc,QAAQ,MAAM;AAAA,EAC5C;AAEA,MAAI,QAAQ,gBAAgB,MAAM;AAChC,SAAK,eAAe,QAAQ;AAAA,EAC9B;AAEA,MAAI,QAAQ,sBAAsB,MAAM;AACtC,SAAK,qBAAqB,QAAQ;AAAA,EACpC;AAEA,MAAI,QAAQ,mBAAmB,MAAM;AACnC,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,QAAQ,eAAe,MAAM;AAC/B,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,QAAQ,qBAAqB,MAAM;AACrC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,QAAQ,UAAU;AACpB,SAAK,WAAW,QAAQ;AAAA,EAC1B;AAEA,MAAI,QAAQ,WAAW;AACrB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,QAAQ,eAAe;AACzB,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,SAAO;AACT;AASA,eAAsB,aACpB,MACA,SAC0B;AAC1B,QAAM,OAAO,iBAAiB,OAAO;AAErC,QAAM,WAAW,MAAM,KAAK,KAAsB,eAAe,IAAI;AAErE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AASA,eAAsB,iBACpB,MACA,OAC0B;AAC1B,QAAM,WAAW,MAAM,KAAK,IAAqB,eAAe,KAAK,EAAE;AAEvE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AAKA,SAASE,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,yBACb,MACA,OACA,eAAuB,KACvB,SAC0B;AAC1B,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,iBAAiB,MAAM,KAAK;AAGjD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AA8BA,eAAsB,QACpB,MACA,SAC0B;AAC1B,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,UAAU,QAAQ;AAGxB,QAAM,UAAU,MAAM,aAAa,MAAM,OAAO;AAGhD,MAAI,CAAC,QAAQ,IAAI;AACf,WAAO;AAAA,EACT;AAGA,MAAI,QAAQ,WAAW,aAAa;AAClC,WAAO;AAAA,EACT;AAGA,SAAO,yBAAyB,MAAM,QAAQ,IAAI,cAAc,OAAO;AACzE;;;AC3OA,eAAsB,eAAe,MAA4C;AAC/E,QAAM,WAAW,MAAM,KAAK,IAAqB,iBAAiB;AAElE,SAAO;AAAA,IACL,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,EAClD;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,aAAa;AAE1D,SAAO;AAAA,IACL,kBAAkB,SAAS,KAAK,oBAAoB;AAAA,IACpD,aAAa,SAAS,KAAK;AAAA,IAC3B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,cAAc,MAAuC;AACzE,QAAM,WAAW,MAAM,KAAK,IAAgB,YAAY;AAExD,SAAO;AAAA,IACL,iBAAiB,SAAS,KAAK,mBAAmB;AAAA,IAClD,YAAY,SAAS,KAAK;AAAA,IAC1B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,WAAW;AAExD,SAAO;AAAA,IACL,SAAS,SAAS,KAAK,WAAW;AAAA,IAClC,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,mBAAmB,SAAS,KAAK,qBAAqB;AAAA,IACtD,oBAAoB,SAAS,KAAK,sBAAsB;AAAA,IACxD,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,IAChD,mBAAmB,SAAS,KAAK;AAAA,EACnC;AACF;;;ACYO,IAAM,kBAAN,MAAsB;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQjB,YAAY,UAAkC,CAAC,GAAG;AAChD,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,qBAAqB;AAClE,UAAM,UACJ,QAAQ,UACR,QAAQ,IAAI,qBACZ,4BACA,QAAQ,OAAO,EAAE;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,OAAO,IAAI,WAAW;AAAA,MACzB;AAAA,MACA;AAAA,MACA,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,eAAe,QAAQ;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,KAAa,SAA4C;AACpE,WAAO,OAAO,KAAK,MAAM,KAAK,OAAO;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BA,MAAM,iBACJ,MACA,SAC8B;AAC9B,WAAO,iBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAAwC;AACjE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,kBAAkB,OAAiC;AACvD,WAAO,kBAAkB,KAAK,MAAM,KAAK;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAA6C;AACtE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,SACyB;AACzB,WAAO,YAAY,KAAK,MAAM,MAAM,OAAO;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+BA,MAAM,WAAW,KAAa,SAAgD;AAC5E,WAAO,WAAW,KAAK,MAAM,KAAK,OAAO;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAAkC;AACrD,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,OAAiC;AACjD,WAAO,YAAY,KAAK,MAAM,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAA6C;AAChE,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,MAAM,KAAa,SAA2C;AAClE,WAAO,MAAM,KAAK,MAAM,KAAK,OAAO;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,MAAM,aAAa,SAA0D;AAC3E,WAAO,aAAa,KAAK,MAAM,OAAO;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAiB,OAAyC;AAC9D,WAAO,iBAAiB,KAAK,MAAM,KAAK;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuCA,MAAM,QAAQ,SAA0D;AACtE,WAAO,QAAQ,KAAK,MAAM,OAAO;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,IAAI,KAAa,SAA4C;AACjE,WAAO,IAAI,KAAK,MAAM,KAAK,OAAO;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,MAAM,OAAO,OAAe,SAAkD;AAC5E,WAAO,OAAO,KAAK,MAAM,OAAO,OAAO;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,iBAA2C;AAC/C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,gBAAqC;AACzC,WAAO,cAAc,KAAK,IAAI;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AACF;","names":["zodToJsonSchema","convertSchema","zodToJsonSchema","sleep","zodToJsonSchema","convertSchema","zodToJsonSchema","sleep"]}
1
+ {"version":3,"sources":["../src/utils/httpClient.ts","../src/errors.ts","../src/methods/scrape.ts","../src/methods/crawl.ts","../src/methods/map.ts","../src/methods/search.ts","../src/methods/batch.ts","../src/methods/extract.ts","../src/methods/usage.ts","../src/client.ts"],"sourcesContent":["import axios, {\n type AxiosInstance,\n type AxiosRequestConfig,\n type AxiosResponse,\n} from \"axios\";\nimport { CrawlGateError, parseApiError } from \"../errors\";\n\n/**\n * HTTP Client configuration options\n */\nexport interface HttpClientOptions {\n /**\n * API key for authentication\n */\n apiKey: string;\n\n /**\n * Base URL for the API\n */\n apiUrl: string;\n\n /**\n * Request timeout in milliseconds\n */\n timeoutMs?: number;\n\n /**\n * Maximum number of retries\n */\n maxRetries?: number;\n\n /**\n * Backoff factor for retries (in seconds)\n */\n backoffFactor?: number;\n}\n\n/**\n * HTTP Client with retry logic and error handling\n */\nexport class HttpClient {\n private readonly instance: AxiosInstance;\n private readonly apiKey: string;\n private readonly apiUrl: string;\n private readonly maxRetries: number;\n private readonly backoffFactor: number;\n\n constructor(options: HttpClientOptions) {\n this.apiKey = options.apiKey;\n this.apiUrl = options.apiUrl.replace(/\\/$/, \"\");\n this.maxRetries = options.maxRetries ?? 3;\n this.backoffFactor = options.backoffFactor ?? 0.5;\n\n this.instance = axios.create({\n baseURL: this.apiUrl,\n timeout: options.timeoutMs ?? 90000,\n headers: {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": this.apiKey,\n },\n });\n }\n\n /**\n * Get the configured API URL\n */\n getApiUrl(): string {\n return this.apiUrl;\n }\n\n /**\n * Get the configured API key\n */\n getApiKey(): string {\n return this.apiKey;\n }\n\n /**\n * Sleep for specified seconds\n */\n private sleep(seconds: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, seconds * 1000));\n }\n\n /**\n * Check if error is retryable\n */\n private isRetryableError(status?: number): boolean {\n return status === 502 || status === 503 || status === 429;\n }\n\n /**\n * Make HTTP request with retry logic\n */\n private async request<T = unknown>(\n config: AxiosRequestConfig\n ): Promise<AxiosResponse<T>> {\n let lastError: Error | undefined;\n\n for (let attempt = 0; attempt < this.maxRetries; attempt++) {\n try {\n // Add SDK origin to request body for tracking\n if (\n config.method &&\n [\"post\", \"put\", \"patch\"].includes(config.method.toLowerCase())\n ) {\n const data = (config.data ?? {}) as Record<string, unknown>;\n config.data = { ...data, origin: \"crawlgate-sdk\" };\n\n // If timeout is specified in body, extend request timeout\n if (typeof data.timeout === \"number\") {\n config.timeout = data.timeout + 5000;\n }\n }\n\n const response = await this.instance.request<T>(config);\n\n // Check for retryable status codes even on \"successful\" responses\n if (\n this.isRetryableError(response.status) &&\n attempt < this.maxRetries - 1\n ) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n return response;\n } catch (err: unknown) {\n const axiosError = err as {\n response?: { status: number; data: unknown };\n message?: string;\n };\n\n lastError =\n err instanceof Error ? err : new Error(String(axiosError?.message));\n const status = axiosError?.response?.status;\n\n // Retry on retryable errors\n if (this.isRetryableError(status) && attempt < this.maxRetries - 1) {\n await this.sleep(this.backoffFactor * Math.pow(2, attempt));\n continue;\n }\n\n // Parse and throw appropriate error\n if (axiosError?.response) {\n parseApiError(\n axiosError.response.status,\n axiosError.response.data as {\n error?: string;\n message?: string;\n details?: unknown;\n }\n );\n }\n\n throw lastError;\n }\n }\n\n throw lastError ?? new CrawlGateError(\"Unexpected HTTP client error\");\n }\n\n /**\n * Make POST request\n */\n async post<T = unknown>(\n endpoint: string,\n body: Record<string, unknown>,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"post\",\n url: endpoint,\n data: body,\n headers,\n });\n }\n\n /**\n * Make GET request\n */\n async get<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"get\",\n url: endpoint,\n headers,\n });\n }\n\n /**\n * Make DELETE request\n */\n async delete<T = unknown>(\n endpoint: string,\n headers?: Record<string, string>\n ): Promise<AxiosResponse<T>> {\n return this.request<T>({\n method: \"delete\",\n url: endpoint,\n headers,\n });\n }\n}\n","/**\n * Base error class for CrawlGate SDK errors\n */\nexport class CrawlGateError extends Error {\n /**\n * HTTP status code (if applicable)\n */\n public readonly statusCode?: number;\n\n /**\n * Error code for programmatic handling\n */\n public readonly code?: string;\n\n /**\n * Additional error details\n */\n public readonly details?: unknown;\n\n constructor(\n message: string,\n statusCode?: number,\n code?: string,\n details?: unknown\n ) {\n super(message);\n this.name = \"CrawlGateError\";\n this.statusCode = statusCode;\n this.code = code;\n this.details = details;\n\n // Maintains proper stack trace for where error was thrown\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, CrawlGateError);\n }\n }\n}\n\n/**\n * Error thrown when authentication fails\n */\nexport class AuthenticationError extends CrawlGateError {\n constructor(message: string = \"Invalid API Key\") {\n super(message, 401, \"AUTHENTICATION_ERROR\");\n this.name = \"AuthenticationError\";\n }\n}\n\n/**\n * Error thrown when request validation fails\n */\nexport class ValidationError extends CrawlGateError {\n constructor(message: string, details?: unknown) {\n super(message, 400, \"VALIDATION_ERROR\", details);\n this.name = \"ValidationError\";\n }\n}\n\n/**\n * Error thrown when a crawl job times out\n */\nexport class JobTimeoutError extends CrawlGateError {\n /**\n * Job ID that timed out\n */\n public readonly jobId: string;\n\n /**\n * Timeout duration in seconds\n */\n public readonly timeoutSeconds: number;\n\n constructor(jobId: string, timeoutSeconds: number) {\n super(\n `Crawl job ${jobId} did not complete within ${timeoutSeconds} seconds`,\n undefined,\n \"JOB_TIMEOUT\"\n );\n this.name = \"JobTimeoutError\";\n this.jobId = jobId;\n this.timeoutSeconds = timeoutSeconds;\n }\n}\n\n/**\n * Error thrown when upstream service is unavailable\n */\nexport class ServiceUnavailableError extends CrawlGateError {\n constructor(message: string = \"Service temporarily unavailable\") {\n super(message, 503, \"SERVICE_UNAVAILABLE\");\n this.name = \"ServiceUnavailableError\";\n }\n}\n\n/**\n * Error thrown when rate limit is exceeded\n */\nexport class RateLimitError extends CrawlGateError {\n /**\n * Time to wait before retrying (in seconds)\n */\n public readonly retryAfter?: number;\n\n constructor(message: string = \"Rate limit exceeded\", retryAfter?: number) {\n super(message, 429, \"RATE_LIMIT_EXCEEDED\");\n this.name = \"RateLimitError\";\n this.retryAfter = retryAfter;\n }\n}\n\n/**\n * Error thrown when LLM extraction fails\n */\nexport class ExtractionError extends CrawlGateError {\n /**\n * Provider that failed\n */\n public readonly provider?: string;\n\n constructor(message: string, provider?: string) {\n super(message, undefined, \"EXTRACTION_ERROR\");\n this.name = \"ExtractionError\";\n this.provider = provider;\n }\n}\n\n/**\n * Parse error response from API and throw appropriate error\n */\nexport function parseApiError(\n status: number,\n data: { error?: string; message?: string; details?: unknown }\n): never {\n const message = data.error || data.message || \"Unknown error\";\n\n switch (status) {\n case 400:\n throw new ValidationError(message, data.details);\n case 401:\n throw new AuthenticationError(message);\n case 429:\n throw new RateLimitError(message);\n case 502:\n case 503:\n throw new ServiceUnavailableError(message);\n default:\n throw new CrawlGateError(message, status, undefined, data.details);\n }\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { ScrapeOptions, ScrapeResponse, Document } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n // Check if it's a Zod schema by looking for _def property\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for scrape endpoint\n */\nfunction buildScrapeBody(\n url: string,\n options?: ScrapeOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.waitFor !== undefined) {\n body.waitFor = options.waitFor;\n }\n\n if (options?.timeout !== undefined) {\n body.timeout = options.timeout;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Scrape a single URL\n *\n * @param http - HTTP client instance\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document\n */\nexport async function scrape(\n http: HttpClient,\n url: string,\n options?: ScrapeOptions\n): Promise<Document> {\n const body = buildScrapeBody(url, options);\n\n const response = await http.post<ScrapeResponse | Document>(\"/v1/scrape\", body);\n\n // Handle both wrapped response { success, data } and direct document response\n const data = response.data as Record<string, unknown>;\n\n // Check if it's an error response\n if (data.success === false) {\n throw new CrawlGateError(\n (data.error as string) || \"Scrape failed\",\n undefined,\n (data.code as string) || \"SCRAPE_ERROR\"\n );\n }\n\n // If response has success: true wrapper, extract data\n if (data.success === true && data.data) {\n return data.data as Document;\n }\n\n // Otherwise, response is the document itself (direct response)\n if (data.url || data.markdown || data.html) {\n return data as unknown as Document;\n }\n\n throw new CrawlGateError(\"No data returned from scrape\", undefined, \"NO_DATA\");\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { CrawlOptions, CrawlResponse, CrawlJob, CrawlErrorsResponse } from \"../types\";\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\n\n/**\n * Build request body for crawl endpoint\n */\nfunction buildCrawlBody(\n url: string,\n options?: CrawlOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.formats) {\n body.formats = options.formats;\n }\n\n if (options?.onlyMainContent !== undefined) {\n body.onlyMainContent = options.onlyMainContent;\n }\n\n if (options?.excludeTags) {\n body.excludeTags = options.excludeTags;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Start a crawl job (async)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n */\nexport async function startCrawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlResponse> {\n const body = buildCrawlBody(url, options);\n\n const response = await http.post<CrawlResponse>(\"/v1/crawl\", body);\n\n if (!response.data.success && !response.data.id) {\n throw new CrawlGateError(\n \"Failed to start crawl job\",\n undefined,\n \"CRAWL_START_ERROR\"\n );\n }\n\n return {\n success: true,\n id: response.data.id,\n jobId: response.data.id,\n status: response.data.status || \"scraping\",\n engine: response.data.engine,\n };\n}\n\n/**\n * Get crawl job status\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Current job status and data\n */\nexport async function getCrawlStatus(\n http: HttpClient,\n jobId: string\n): Promise<CrawlJob> {\n const response = await http.get<CrawlJob>(`/v1/crawl/${jobId}`);\n\n return {\n id: response.data.id || jobId,\n status: response.data.status,\n total: response.data.total || 0,\n completed: response.data.completed || 0,\n data: response.data.data || [],\n engine: response.data.engine,\n error: response.data.error,\n };\n}\n\n/**\n * Cancel a crawl job\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\nexport async function cancelCrawl(\n http: HttpClient,\n jobId: string\n): Promise<boolean> {\n const response = await http.delete<{ success?: boolean; message?: string }>(\n `/v1/crawl/${jobId}`\n );\n\n return response.data.success !== false;\n}\n\n/**\n * Sleep for specified milliseconds\n */\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Crawl a website and wait for completion (waiter pattern)\n *\n * @param http - HTTP client instance\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all data\n */\nexport async function crawl(\n http: HttpClient,\n url: string,\n options?: CrawlOptions\n): Promise<CrawlJob> {\n const pollInterval = options?.pollInterval ?? 2000; // 2 seconds default\n const timeout = options?.timeout ?? 300; // 5 minutes default (in seconds)\n\n // Start the crawl job\n const { id: jobId } = await startCrawl(http, url, options);\n\n const startTime = Date.now();\n const timeoutMs = timeout * 1000;\n\n // Poll until completion or timeout\n while (true) {\n const status = await getCrawlStatus(http, jobId);\n\n // Check for terminal states\n if (status.status === \"completed\") {\n return status;\n }\n\n if (status.status === \"failed\") {\n throw new CrawlGateError(\n status.error || \"Crawl job failed\",\n undefined,\n \"CRAWL_FAILED\"\n );\n }\n\n if (status.status === \"cancelled\") {\n throw new CrawlGateError(\n \"Crawl job was cancelled\",\n undefined,\n \"CRAWL_CANCELLED\"\n );\n }\n\n // Check for timeout\n if (Date.now() - startTime > timeoutMs) {\n throw new JobTimeoutError(jobId, timeout);\n }\n\n // Wait before next poll\n await sleep(pollInterval);\n }\n}\n\n/**\n * Get crawl job errors and robots.txt blocks\n *\n * @param http - HTTP client instance\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\nexport async function getCrawlErrors(\n http: HttpClient,\n jobId: string\n): Promise<CrawlErrorsResponse> {\n const response = await http.get<{\n success?: boolean;\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\n errors?: Array<Record<string, string>>;\n robotsBlocked?: string[];\n }>(`/v1/crawl/${jobId}/errors`);\n\n const payload = response.data.data ?? response.data;\n return {\n errors: (payload.errors || []).map((e) => ({\n id: e.id || \"\",\n timestamp: e.timestamp,\n url: e.url || \"\",\n code: e.code,\n error: e.error || e.message || \"Unknown error\",\n })),\n robotsBlocked: payload.robotsBlocked || [],\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\nimport type { MapOptions, MapResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Build request body for map endpoint\n */\nfunction buildMapBody(\n url: string,\n options?: MapOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { url };\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.proxy) {\n body.proxy = options.proxy;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n return body;\n}\n\n/**\n * Map a website to discover all URLs\n *\n * @param http - HTTP client instance\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n */\nexport async function map(\n http: HttpClient,\n url: string,\n options?: MapOptions\n): Promise<MapResponse> {\n const body = buildMapBody(url, options);\n\n const response = await http.post<MapResponse>(\"/v1/map\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Map failed\",\n undefined,\n \"MAP_ERROR\"\n );\n }\n\n return {\n success: true,\n links: response.data.links || [],\n count: response.data.count || response.data.links?.length || 0,\n engine: response.data.engine,\n };\n}\n","import type { ZodTypeAny } from \"zod\";\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\nimport type { HttpClient } from \"../utils/httpClient\";\nimport type { SearchOptions, SearchResponse } from \"../types\";\nimport { CrawlGateError } from \"../errors\";\n\n/**\n * Convert Zod schema to JSON Schema if needed\n */\nfunction convertSchema(\n schema: Record<string, unknown> | ZodTypeAny\n): Record<string, unknown> {\n if (schema && typeof schema === \"object\" && \"_def\" in schema) {\n return zodToJsonSchema(schema as ZodTypeAny) as Record<string, unknown>;\n }\n return schema as Record<string, unknown>;\n}\n\n/**\n * Build request body for search endpoint\n */\nfunction buildSearchBody(\n query: string,\n options?: SearchOptions\n): Record<string, unknown> {\n const body: Record<string, unknown> = { query };\n\n if (options?.limit !== undefined) {\n body.limit = options.limit;\n }\n\n if (options?.lang) {\n body.lang = options.lang;\n }\n\n if (options?.country) {\n body.country = options.country;\n }\n\n if (options?.engines) {\n body.engines = options.engines;\n }\n\n if (options?.scrapeOptions) {\n body.scrapeOptions = options.scrapeOptions;\n }\n\n if (options?.engine) {\n body.engine = options.engine;\n }\n\n if (options?.projectId) {\n body.project_id = options.projectId;\n }\n\n // Handle LLM extraction\n if (options?.extract) {\n body.extract = {\n schema: convertSchema(options.extract.schema),\n systemPrompt: options.extract.systemPrompt,\n provider: options.extract.provider,\n enableFallback: options.extract.enableFallback,\n };\n\n // Remove undefined values\n Object.keys(body.extract as Record<string, unknown>).forEach((key) => {\n if ((body.extract as Record<string, unknown>)[key] === undefined) {\n delete (body.extract as Record<string, unknown>)[key];\n }\n });\n }\n\n return body;\n}\n\n/**\n * Search the web and optionally scrape results\n *\n * @param http - HTTP client instance\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n */\nexport async function search(\n http: HttpClient,\n query: string,\n options?: SearchOptions\n): Promise<SearchResponse> {\n const body = buildSearchBody(query, options);\n\n const response = await http.post<SearchResponse>(\"/v1/search\", body);\n\n if (!response.data.success) {\n throw new CrawlGateError(\n response.data.error || \"Search failed\",\n undefined,\n \"SEARCH_ERROR\"\n );\n }\n\n return {\n success: true,\n data: response.data.data || [],\n query: response.data.query || query,\n totalResults: response.data.totalResults,\n searchTime: response.data.searchTime,\n extract: response.data.extract,\n };\n}\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type {\r\n BatchScrapeOptions,\r\n BatchScrapeResponse,\r\n BatchScrapeJob,\r\n CrawlErrorsResponse,\r\n Document,\r\n ScrapeOptions,\r\n} from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Build request body for batch scrape endpoint\r\n */\r\nfunction buildBatchBody(\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = { urls };\r\n\r\n if (options?.options) {\r\n // Spread scrape options into body\r\n const scrapeOpts = options.options;\r\n if (scrapeOpts.engine) body.engine = scrapeOpts.engine;\r\n if (scrapeOpts.formats) body.formats = scrapeOpts.formats;\r\n if (scrapeOpts.onlyMainContent !== undefined) body.onlyMainContent = scrapeOpts.onlyMainContent;\r\n if (scrapeOpts.excludeTags) body.excludeTags = scrapeOpts.excludeTags;\r\n if (scrapeOpts.waitFor !== undefined) body.waitFor = scrapeOpts.waitFor;\r\n if (scrapeOpts.timeout !== undefined) body.timeout = scrapeOpts.timeout;\r\n if (scrapeOpts.proxy) body.proxy = scrapeOpts.proxy;\r\n }\r\n\r\n if (options?.webhook != null) {\r\n body.webhook = options.webhook;\r\n }\r\n\r\n if (options?.appendToId != null) {\r\n body.appendToId = options.appendToId;\r\n }\r\n\r\n if (options?.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options?.maxConcurrency != null) {\r\n body.maxConcurrency = options.maxConcurrency;\r\n }\r\n\r\n if (options?.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start a batch scrape job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options\r\n * @returns Batch job ID and initial status\r\n */\r\nexport async function startBatchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeResponse> {\r\n if (!Array.isArray(urls) || urls.length === 0) {\r\n throw new CrawlGateError(\"URLs array cannot be empty\", 400, \"VALIDATION_ERROR\");\r\n }\r\n\r\n const body = buildBatchBody(urls, options);\r\n\r\n const headers: Record<string, string> = {};\r\n if (options?.idempotencyKey) {\r\n headers[\"Idempotency-Key\"] = options.idempotencyKey;\r\n }\r\n\r\n const response = await http.post<BatchScrapeResponse>(\r\n \"/v1/batch/scrape\",\r\n body,\r\n Object.keys(headers).length > 0 ? headers : undefined\r\n );\r\n\r\n if (!response.data.success && !response.data.id) {\r\n throw new CrawlGateError(\r\n response.data.error || \"Failed to start batch scrape job\",\r\n undefined,\r\n \"BATCH_START_ERROR\"\r\n );\r\n }\r\n\r\n return {\r\n success: true,\r\n id: response.data.id,\r\n url: response.data.url,\r\n invalidURLs: response.data.invalidURLs,\r\n };\r\n}\r\n\r\n/**\r\n * Get batch scrape job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getBatchScrapeStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<BatchScrapeJob> {\r\n const response = await http.get<BatchScrapeJob & { success?: boolean }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return {\r\n id: response.data.id || jobId,\r\n status: response.data.status,\r\n total: response.data.total || 0,\r\n completed: response.data.completed || 0,\r\n creditsUsed: response.data.creditsUsed,\r\n expiresAt: response.data.expiresAt,\r\n next: response.data.next ?? null,\r\n data: response.data.data || [],\r\n error: response.data.error,\r\n };\r\n}\r\n\r\n/**\r\n * Cancel a batch scrape job\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns True if cancelled successfully\r\n */\r\nexport async function cancelBatchScrape(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<boolean> {\r\n const response = await http.delete<{ success?: boolean; status?: string }>(\r\n `/v1/batch/scrape/${jobId}`\r\n );\r\n\r\n return response.data.status === \"cancelled\" || response.data.success !== false;\r\n}\r\n\r\n/**\r\n * Get batch scrape errors\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @returns Errors and robots.txt blocks\r\n */\r\nexport async function getBatchScrapeErrors(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<CrawlErrorsResponse> {\r\n const response = await http.get<{\r\n success?: boolean;\r\n data?: { errors: Array<Record<string, string>>; robotsBlocked: string[] };\r\n errors?: Array<Record<string, string>>;\r\n robotsBlocked?: string[];\r\n }>(`/v1/batch/scrape/${jobId}/errors`);\r\n\r\n const payload = response.data.data ?? response.data;\r\n return {\r\n errors: (payload.errors || []).map((e) => ({\r\n id: e.id || \"\",\r\n timestamp: e.timestamp,\r\n url: e.url || \"\",\r\n code: e.code,\r\n error: e.error || e.message || \"Unknown error\",\r\n })),\r\n robotsBlocked: payload.robotsBlocked || [],\r\n };\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for batch scrape job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Batch job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final job status with all data\r\n */\r\nasync function waitForBatchCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<BatchScrapeJob> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getBatchScrapeStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Batch scrape job failed\",\r\n undefined,\r\n \"BATCH_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Batch scrape job was cancelled\",\r\n undefined,\r\n \"BATCH_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Batch scrape multiple URLs and wait for completion (waiter pattern)\r\n *\r\n * @param http - HTTP client instance\r\n * @param urls - Array of URLs to scrape\r\n * @param options - Batch scrape options including pollInterval and timeout\r\n * @returns Final job with all scraped data\r\n */\r\nexport async function batchScrape(\r\n http: HttpClient,\r\n urls: string[],\r\n options?: BatchScrapeOptions\r\n): Promise<BatchScrapeJob> {\r\n const pollInterval = options?.pollInterval ?? 2000;\r\n const timeout = options?.timeout;\r\n\r\n // Start the batch job\r\n const { id: jobId } = await startBatchScrape(http, urls, options);\r\n\r\n // Wait for completion\r\n return waitForBatchCompletion(http, jobId, pollInterval, timeout);\r\n}\r\n\r\n/**\r\n * Split URLs into chunks for large batch operations\r\n *\r\n * @param urls - Array of URLs\r\n * @param chunkSize - Maximum URLs per chunk (default: 100)\r\n * @returns Array of URL chunks\r\n */\r\nexport function chunkUrls(urls: string[], chunkSize: number = 100): string[][] {\r\n const chunks: string[][] = [];\r\n for (let i = 0; i < urls.length; i += chunkSize) {\r\n chunks.push(urls.slice(i, i + chunkSize));\r\n }\r\n return chunks;\r\n}\r\n","import type { ZodTypeAny } from \"zod\";\r\nimport { zodToJsonSchema } from \"zod-to-json-schema\";\r\nimport type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ExtractRequestOptions, ExtractResponse, ScrapeOptions } from \"../types\";\r\nimport { CrawlGateError, JobTimeoutError } from \"../errors\";\r\n\r\n/**\r\n * Check if value is a Zod schema\r\n */\r\nfunction isZodSchema(value: unknown): value is ZodTypeAny {\r\n return (\r\n value !== null &&\r\n typeof value === \"object\" &&\r\n \"_def\" in value &&\r\n (typeof (value as any).safeParse === \"function\" || typeof (value as any).parse === \"function\")\r\n );\r\n}\r\n\r\n/**\r\n * Convert Zod schema to JSON Schema if needed\r\n */\r\nfunction convertSchema(\r\n schema: Record<string, unknown> | ZodTypeAny\r\n): Record<string, unknown> {\r\n if (isZodSchema(schema)) {\r\n return zodToJsonSchema(schema) as Record<string, unknown>;\r\n }\r\n return schema as Record<string, unknown>;\r\n}\r\n\r\n/**\r\n * Build request body for extract endpoint\r\n */\r\nfunction buildExtractBody(\r\n options: ExtractRequestOptions\r\n): Record<string, unknown> {\r\n const body: Record<string, unknown> = {};\r\n\r\n if (options.urls) {\r\n body.urls = options.urls;\r\n }\r\n\r\n if (options.prompt != null) {\r\n body.prompt = options.prompt;\r\n }\r\n\r\n if (options.schema != null) {\r\n body.schema = convertSchema(options.schema);\r\n }\r\n\r\n if (options.systemPrompt != null) {\r\n body.systemPrompt = options.systemPrompt;\r\n }\r\n\r\n if (options.allowExternalLinks != null) {\r\n body.allowExternalLinks = options.allowExternalLinks;\r\n }\r\n\r\n if (options.enableWebSearch != null) {\r\n body.enableWebSearch = options.enableWebSearch;\r\n }\r\n\r\n if (options.showSources != null) {\r\n body.showSources = options.showSources;\r\n }\r\n\r\n if (options.ignoreInvalidURLs != null) {\r\n body.ignoreInvalidURLs = options.ignoreInvalidURLs;\r\n }\r\n\r\n if (options.provider) {\r\n body.provider = options.provider;\r\n }\r\n\r\n if (options.projectId) {\r\n body.project_id = options.projectId;\r\n }\r\n\r\n if (options.scrapeOptions) {\r\n body.scrapeOptions = options.scrapeOptions;\r\n }\r\n\r\n return body;\r\n}\r\n\r\n/**\r\n * Start an extract job (async)\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options\r\n * @returns Extract job ID or immediate result\r\n */\r\nexport async function startExtract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const body = buildExtractBody(options);\r\n\r\n const response = await http.post<ExtractResponse>(\"/v1/extract\", body);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Get extract job status\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @returns Current job status and data\r\n */\r\nexport async function getExtractStatus(\r\n http: HttpClient,\r\n jobId: string\r\n): Promise<ExtractResponse> {\r\n const response = await http.get<ExtractResponse>(`/v1/extract/${jobId}`);\r\n\r\n if (response.data.success === false && response.data.error) {\r\n throw new CrawlGateError(\r\n response.data.error,\r\n undefined,\r\n \"EXTRACT_STATUS_ERROR\"\r\n );\r\n }\r\n\r\n return response.data;\r\n}\r\n\r\n/**\r\n * Sleep for specified milliseconds\r\n */\r\nfunction sleep(ms: number): Promise<void> {\r\n return new Promise((resolve) => setTimeout(resolve, ms));\r\n}\r\n\r\n/**\r\n * Wait for extract job completion\r\n *\r\n * @param http - HTTP client instance\r\n * @param jobId - Extract job ID\r\n * @param pollInterval - Poll interval in milliseconds\r\n * @param timeout - Maximum wait time in seconds\r\n * @returns Final extract result\r\n */\r\nasync function waitForExtractCompletion(\r\n http: HttpClient,\r\n jobId: string,\r\n pollInterval: number = 2000,\r\n timeout?: number\r\n): Promise<ExtractResponse> {\r\n const startTime = Date.now();\r\n const timeoutMs = timeout ? timeout * 1000 : undefined;\r\n\r\n while (true) {\r\n const status = await getExtractStatus(http, jobId);\r\n\r\n // Check for terminal states\r\n if (status.status === \"completed\") {\r\n return status;\r\n }\r\n\r\n if (status.status === \"failed\") {\r\n throw new CrawlGateError(\r\n status.error || \"Extract job failed\",\r\n undefined,\r\n \"EXTRACT_FAILED\"\r\n );\r\n }\r\n\r\n if (status.status === \"cancelled\") {\r\n throw new CrawlGateError(\r\n \"Extract job was cancelled\",\r\n undefined,\r\n \"EXTRACT_CANCELLED\"\r\n );\r\n }\r\n\r\n // Check for timeout\r\n if (timeoutMs && Date.now() - startTime > timeoutMs) {\r\n throw new JobTimeoutError(jobId, timeout!);\r\n }\r\n\r\n // Wait before next poll\r\n await sleep(Math.max(1000, pollInterval));\r\n }\r\n}\r\n\r\n/**\r\n * Extract structured data from URLs using LLM (waiter pattern)\r\n *\r\n * This method starts an extract job and polls until completion.\r\n * For synchronous extracts (small payloads), it may return immediately.\r\n *\r\n * @param http - HTTP client instance\r\n * @param options - Extract request options including pollInterval and timeout\r\n * @returns Final extract result with structured data\r\n *\r\n * @example\r\n * ```typescript\r\n * import { z } from 'zod';\r\n *\r\n * const result = await extract(http, {\r\n * urls: ['https://example.com/product'],\r\n * schema: z.object({\r\n * name: z.string(),\r\n * price: z.number(),\r\n * inStock: z.boolean()\r\n * }),\r\n * systemPrompt: 'Extract product information',\r\n * provider: 'openai'\r\n * });\r\n *\r\n * console.log(result.data);\r\n * ```\r\n */\r\nexport async function extract(\r\n http: HttpClient,\r\n options: ExtractRequestOptions\r\n): Promise<ExtractResponse> {\r\n const pollInterval = options.pollInterval ?? 2000;\r\n const timeout = options.timeout;\r\n\r\n // Start the extract job\r\n const started = await startExtract(http, options);\r\n\r\n // If no job ID, it was a synchronous response\r\n if (!started.id) {\r\n return started;\r\n }\r\n\r\n // If already completed, return immediately\r\n if (started.status === \"completed\") {\r\n return started;\r\n }\r\n\r\n // Wait for completion\r\n return waitForExtractCompletion(http, started.id, pollInterval, timeout);\r\n}\r\n","import type { HttpClient } from \"../utils/httpClient\";\r\nimport type { ConcurrencyInfo, CreditUsage, TokenUsage, QueueStatus } from \"../types\";\r\n\r\n/**\r\n * Get current concurrency usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Current and max concurrency\r\n */\r\nexport async function getConcurrency(http: HttpClient): Promise<ConcurrencyInfo> {\r\n const response = await http.get<ConcurrencyInfo>(\"/v1/concurrency\");\r\n\r\n return {\r\n concurrency: response.data.concurrency ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n };\r\n}\r\n\r\n/**\r\n * Get current credit usage\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Credit usage information\r\n */\r\nexport async function getCreditUsage(http: HttpClient): Promise<CreditUsage> {\r\n const response = await http.get<CreditUsage>(\"/v1/credits\");\r\n\r\n return {\r\n remainingCredits: response.data.remainingCredits ?? 0,\r\n planCredits: response.data.planCredits,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get current token usage (for LLM extraction)\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Token usage information\r\n */\r\nexport async function getTokenUsage(http: HttpClient): Promise<TokenUsage> {\r\n const response = await http.get<TokenUsage>(\"/v1/tokens\");\r\n\r\n return {\r\n remainingTokens: response.data.remainingTokens ?? 0,\r\n planTokens: response.data.planTokens,\r\n billingPeriodStart: response.data.billingPeriodStart,\r\n billingPeriodEnd: response.data.billingPeriodEnd,\r\n };\r\n}\r\n\r\n/**\r\n * Get queue status information\r\n *\r\n * @param http - HTTP client instance\r\n * @returns Queue status metrics\r\n */\r\nexport async function getQueueStatus(http: HttpClient): Promise<QueueStatus> {\r\n const response = await http.get<QueueStatus>(\"/v1/queue\");\r\n\r\n return {\r\n success: response.data.success ?? true,\r\n jobsInQueue: response.data.jobsInQueue ?? 0,\r\n activeJobsInQueue: response.data.activeJobsInQueue ?? 0,\r\n waitingJobsInQueue: response.data.waitingJobsInQueue ?? 0,\r\n maxConcurrency: response.data.maxConcurrency ?? 0,\r\n mostRecentSuccess: response.data.mostRecentSuccess,\r\n };\r\n}\r\n","import { HttpClient } from \"./utils/httpClient\";\nimport { scrape } from \"./methods/scrape\";\nimport { startCrawl, getCrawlStatus, cancelCrawl, crawl, getCrawlErrors } from \"./methods/crawl\";\nimport { map } from \"./methods/map\";\nimport { search } from \"./methods/search\";\nimport {\n startBatchScrape,\n getBatchScrapeStatus,\n cancelBatchScrape,\n batchScrape,\n getBatchScrapeErrors,\n} from \"./methods/batch\";\nimport { startExtract, getExtractStatus, extract } from \"./methods/extract\";\nimport { getConcurrency, getCreditUsage, getTokenUsage, getQueueStatus } from \"./methods/usage\";\nimport type {\n CrawlGateClientOptions,\n ScrapeOptions,\n CrawlOptions,\n MapOptions,\n SearchOptions,\n Document,\n CrawlResponse,\n CrawlJob,\n CrawlErrorsResponse,\n MapResponse,\n SearchResponse,\n BatchScrapeOptions,\n BatchScrapeResponse,\n BatchScrapeJob,\n ExtractRequestOptions,\n ExtractResponse,\n ConcurrencyInfo,\n CreditUsage,\n TokenUsage,\n QueueStatus,\n} from \"./types\";\nimport { CrawlGateError } from \"./errors\";\n\n/**\n * CrawlGate SDK Client\n *\n * @example\n * ```typescript\n * import { CrawlGateClient } from '@crawlgate/sdk';\n *\n * const client = new CrawlGateClient({\n * apiKey: 'sk_live_...',\n * apiUrl: 'https://api.crawlgate.io'\n * });\n *\n * // Scrape a single URL\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html']\n * });\n *\n * // Batch scrape multiple URLs\n * const job = await client.batchScrape(['https://a.com', 'https://b.com'], {\n * options: { formats: ['markdown'] }\n * });\n *\n * // Crawl a website\n * const crawlJob = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Extract structured data with LLM\n * const extracted = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Search the web\n * const results = await client.search('best restaurants', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] }\n * });\n * ```\n */\nexport class CrawlGateClient {\n private readonly http: HttpClient;\n\n /**\n * Create a new CrawlGate client\n *\n * @param options - Client configuration options\n * @throws {CrawlGateError} If API key is not provided\n */\n constructor(options: CrawlGateClientOptions = {}) {\n const apiKey = options.apiKey ?? process.env.CRAWLGATE_API_KEY ?? \"\";\n const apiUrl = (\n options.apiUrl ??\n process.env.CRAWLGATE_API_URL ??\n \"https://api.crawlgate.io\"\n ).replace(/\\/$/, \"\");\n\n if (!apiKey) {\n throw new CrawlGateError(\n \"API key is required. Set CRAWLGATE_API_KEY env variable or pass apiKey option.\",\n undefined,\n \"MISSING_API_KEY\"\n );\n }\n\n this.http = new HttpClient({\n apiKey,\n apiUrl,\n timeoutMs: options.timeoutMs,\n maxRetries: options.maxRetries,\n backoffFactor: options.backoffFactor,\n });\n }\n\n // ==========================================================================\n // Scrape Methods\n // ==========================================================================\n\n /**\n * Scrape a single URL\n *\n * @param url - URL to scrape\n * @param options - Scrape options\n * @returns Scraped document with requested formats\n *\n * @example\n * ```typescript\n * const doc = await client.scrape('https://example.com', {\n * engine: 'smart',\n * formats: ['markdown', 'html'],\n * onlyMainContent: true\n * });\n * console.log(doc.markdown);\n * ```\n *\n * @example With LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const schema = z.object({\n * title: z.string(),\n * price: z.number(),\n * inStock: z.boolean()\n * });\n *\n * const doc = await client.scrape('https://example.com/product', {\n * engine: 'smart',\n * extract: {\n * schema,\n * systemPrompt: 'Extract product details',\n * provider: 'openai'\n * }\n * });\n * console.log(doc.extract?.data);\n * ```\n */\n async scrape(url: string, options?: ScrapeOptions): Promise<Document> {\n return scrape(this.http, url, options);\n }\n\n // ==========================================================================\n // Batch Scrape Methods\n // ==========================================================================\n\n /**\n * Start a batch scrape job (async)\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch scrape options\n * @returns Batch job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startBatchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * { options: { formats: ['markdown'] } }\n * );\n *\n * // Poll manually\n * let status = await client.getBatchScrapeStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getBatchScrapeStatus(id);\n * }\n * ```\n */\n async startBatchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeResponse> {\n return startBatchScrape(this.http, urls, options);\n }\n\n /**\n * Get batch scrape job status and data\n *\n * @param jobId - Batch job ID\n * @returns Current job status and scraped data\n */\n async getBatchScrapeStatus(jobId: string): Promise<BatchScrapeJob> {\n return getBatchScrapeStatus(this.http, jobId);\n }\n\n /**\n * Cancel a batch scrape job\n *\n * @param jobId - Batch job ID\n * @returns True if cancelled successfully\n */\n async cancelBatchScrape(jobId: string): Promise<boolean> {\n return cancelBatchScrape(this.http, jobId);\n }\n\n /**\n * Get batch scrape job errors\n *\n * @param jobId - Batch job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getBatchScrapeErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getBatchScrapeErrors(this.http, jobId);\n }\n\n /**\n * Batch scrape multiple URLs and wait for completion\n *\n * @param urls - Array of URLs to scrape\n * @param options - Batch options including pollInterval and timeout\n * @returns Final job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.batchScrape(\n * ['https://a.com', 'https://b.com', 'https://c.com'],\n * {\n * options: { formats: ['markdown'], engine: 'smart' },\n * pollInterval: 2000,\n * timeout: 300\n * }\n * );\n *\n * console.log(`Scraped ${job.completed} URLs`);\n * job.data.forEach(doc => console.log(doc.url, doc.markdown?.length));\n * ```\n */\n async batchScrape(\n urls: string[],\n options?: BatchScrapeOptions\n ): Promise<BatchScrapeJob> {\n return batchScrape(this.http, urls, options);\n }\n\n // ==========================================================================\n // Crawl Methods\n // ==========================================================================\n\n /**\n * Start a crawl job (async)\n *\n * Use this method when you want to start a crawl and manage polling yourself.\n * For automatic polling, use the `crawl()` method instead.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options\n * @returns Crawl job ID and initial status\n *\n * @example\n * ```typescript\n * const { id } = await client.startCrawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic'\n * });\n *\n * // Poll for status manually\n * let status = await client.getCrawlStatus(id);\n * while (status.status === 'scraping') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getCrawlStatus(id);\n * }\n * ```\n */\n async startCrawl(url: string, options?: CrawlOptions): Promise<CrawlResponse> {\n return startCrawl(this.http, url, options);\n }\n\n /**\n * Get crawl job status and data\n *\n * @param jobId - Crawl job ID\n * @returns Current job status and scraped data\n */\n async getCrawlStatus(jobId: string): Promise<CrawlJob> {\n return getCrawlStatus(this.http, jobId);\n }\n\n /**\n * Cancel a crawl job\n *\n * @param jobId - Crawl job ID\n * @returns True if cancelled successfully\n */\n async cancelCrawl(jobId: string): Promise<boolean> {\n return cancelCrawl(this.http, jobId);\n }\n\n /**\n * Get crawl job errors and robots.txt blocks\n *\n * @param jobId - Crawl job ID\n * @returns Errors and robots.txt blocked URLs\n */\n async getCrawlErrors(jobId: string): Promise<CrawlErrorsResponse> {\n return getCrawlErrors(this.http, jobId);\n }\n\n /**\n * Crawl a website and wait for completion\n *\n * This method starts a crawl job and automatically polls until completion.\n *\n * @param url - Root URL to crawl\n * @param options - Crawl options including pollInterval and timeout\n * @returns Final crawl job with all scraped data\n *\n * @example\n * ```typescript\n * const job = await client.crawl('https://example.com', {\n * limit: 10,\n * engine: 'dynamic',\n * formats: ['markdown'],\n * pollInterval: 2000, // Poll every 2 seconds\n * timeout: 300 // 5 minute timeout\n * });\n *\n * console.log(`Crawled ${job.completed} pages`);\n * job.data.forEach(doc => console.log(doc.url));\n * ```\n */\n async crawl(url: string, options?: CrawlOptions): Promise<CrawlJob> {\n return crawl(this.http, url, options);\n }\n\n // ==========================================================================\n // Extract Methods (Standalone LLM Extraction)\n // ==========================================================================\n\n /**\n * Start an extract job (async)\n *\n * @param options - Extract request options\n * @returns Extract job ID or immediate result\n *\n * @example\n * ```typescript\n * const { id } = await client.startExtract({\n * urls: ['https://example.com/product'],\n * schema: { name: 'string', price: 'number' },\n * provider: 'openai'\n * });\n *\n * // Poll manually\n * let status = await client.getExtractStatus(id);\n * while (status.status === 'processing') {\n * await new Promise(r => setTimeout(r, 2000));\n * status = await client.getExtractStatus(id);\n * }\n * console.log(status.data);\n * ```\n */\n async startExtract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return startExtract(this.http, options);\n }\n\n /**\n * Get extract job status and data\n *\n * @param jobId - Extract job ID\n * @returns Current job status and extracted data\n */\n async getExtractStatus(jobId: string): Promise<ExtractResponse> {\n return getExtractStatus(this.http, jobId);\n }\n\n /**\n * Extract structured data from URLs using LLM and wait for completion\n *\n * @param options - Extract options including schema, prompt, and timeout\n * @returns Final extract result with structured data\n *\n * @example With Zod schema\n * ```typescript\n * import { z } from 'zod';\n *\n * const result = await client.extract({\n * urls: ['https://example.com/product'],\n * schema: z.object({\n * name: z.string(),\n * price: z.number(),\n * inStock: z.boolean(),\n * features: z.array(z.string())\n * }),\n * systemPrompt: 'Extract product information from the page',\n * provider: 'openai',\n * timeout: 60\n * });\n *\n * console.log(result.data);\n * ```\n *\n * @example With natural language prompt\n * ```typescript\n * const result = await client.extract({\n * urls: ['https://example.com/about'],\n * prompt: 'Extract the company name, founding year, and list of team members',\n * enableWebSearch: true\n * });\n *\n * console.log(result.data);\n * ```\n */\n async extract(options: ExtractRequestOptions): Promise<ExtractResponse> {\n return extract(this.http, options);\n }\n\n // ==========================================================================\n // Map Methods\n // ==========================================================================\n\n /**\n * Map a website to discover all URLs\n *\n * @param url - Root URL to map\n * @param options - Map options\n * @returns List of discovered URLs\n *\n * @example\n * ```typescript\n * const result = await client.map('https://example.com', {\n * engine: 'dynamic'\n * });\n *\n * console.log(`Found ${result.count} URLs:`);\n * result.links.forEach(url => console.log(url));\n * ```\n */\n async map(url: string, options?: MapOptions): Promise<MapResponse> {\n return map(this.http, url, options);\n }\n\n // ==========================================================================\n // Search Methods\n // ==========================================================================\n\n /**\n * Search the web and optionally scrape results\n *\n * @param query - Search query\n * @param options - Search options\n * @returns Search results with optional scraped content\n *\n * @example Basic search\n * ```typescript\n * const results = await client.search('best restaurants in NYC', {\n * limit: 10,\n * lang: 'en',\n * country: 'us'\n * });\n *\n * results.data.forEach(r => {\n * console.log(`${r.title}: ${r.url}`);\n * });\n * ```\n *\n * @example Search with scraping\n * ```typescript\n * const results = await client.search('best laptops 2024', {\n * limit: 5,\n * scrapeOptions: {\n * formats: ['markdown']\n * },\n * engine: 'smart'\n * });\n *\n * results.data.forEach(r => {\n * console.log(r.title);\n * console.log(r.markdown?.substring(0, 200));\n * });\n * ```\n *\n * @example Search with LLM extraction\n * ```typescript\n * import { z } from 'zod';\n *\n * const results = await client.search('iPhone 15 Pro reviews', {\n * limit: 5,\n * scrapeOptions: { formats: ['markdown'] },\n * extract: {\n * schema: z.object({\n * pros: z.array(z.string()),\n * cons: z.array(z.string()),\n * rating: z.number()\n * }),\n * systemPrompt: 'Extract review summary from the content'\n * }\n * });\n *\n * console.log(results.extract?.data);\n * ```\n */\n async search(query: string, options?: SearchOptions): Promise<SearchResponse> {\n return search(this.http, query, options);\n }\n\n // ==========================================================================\n // Usage & Monitoring Methods\n // ==========================================================================\n\n /**\n * Get current concurrency usage\n *\n * @returns Current and max concurrency\n *\n * @example\n * ```typescript\n * const { concurrency, maxConcurrency } = await client.getConcurrency();\n * console.log(`Using ${concurrency}/${maxConcurrency} concurrent requests`);\n * ```\n */\n async getConcurrency(): Promise<ConcurrencyInfo> {\n return getConcurrency(this.http);\n }\n\n /**\n * Get current credit usage\n *\n * @returns Credit usage information\n *\n * @example\n * ```typescript\n * const credits = await client.getCreditUsage();\n * console.log(`Remaining credits: ${credits.remainingCredits}`);\n * ```\n */\n async getCreditUsage(): Promise<CreditUsage> {\n return getCreditUsage(this.http);\n }\n\n /**\n * Get current token usage (for LLM extraction)\n *\n * @returns Token usage information\n *\n * @example\n * ```typescript\n * const tokens = await client.getTokenUsage();\n * console.log(`Remaining tokens: ${tokens.remainingTokens}`);\n * ```\n */\n async getTokenUsage(): Promise<TokenUsage> {\n return getTokenUsage(this.http);\n }\n\n /**\n * Get queue status information\n *\n * @returns Queue status metrics\n *\n * @example\n * ```typescript\n * const queue = await client.getQueueStatus();\n * console.log(`Jobs in queue: ${queue.jobsInQueue}`);\n * console.log(`Active: ${queue.activeJobsInQueue}, Waiting: ${queue.waitingJobsInQueue}`);\n * ```\n */\n async getQueueStatus(): Promise<QueueStatus> {\n return getQueueStatus(this.http);\n }\n}\n\nexport default CrawlGateClient;\n"],"mappings":";AAAA,OAAO,WAIA;;;ACDA,IAAM,iBAAN,MAAM,wBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA,EAIxB;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YACE,SACA,YACA,MACA,SACA;AACA,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,UAAU;AAGf,QAAI,MAAM,mBAAmB;AAC3B,YAAM,kBAAkB,MAAM,eAAc;AAAA,IAC9C;AAAA,EACF;AACF;AAKO,IAAM,sBAAN,cAAkC,eAAe;AAAA,EACtD,YAAY,UAAkB,mBAAmB;AAC/C,UAAM,SAAS,KAAK,sBAAsB;AAC1C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA,EAClD,YAAY,SAAiB,SAAmB;AAC9C,UAAM,SAAS,KAAK,oBAAoB,OAAO;AAC/C,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA;AAAA;AAAA;AAAA,EAKA;AAAA,EAEhB,YAAY,OAAe,gBAAwB;AACjD;AAAA,MACE,aAAa,KAAK,4BAA4B,cAAc;AAAA,MAC5D;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AACZ,SAAK,QAAQ;AACb,SAAK,iBAAiB;AAAA,EACxB;AACF;AAKO,IAAM,0BAAN,cAAsC,eAAe;AAAA,EAC1D,YAAY,UAAkB,mCAAmC;AAC/D,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AAAA,EACd;AACF;AAKO,IAAM,iBAAN,cAA6B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIjC;AAAA,EAEhB,YAAY,UAAkB,uBAAuB,YAAqB;AACxE,UAAM,SAAS,KAAK,qBAAqB;AACzC,SAAK,OAAO;AACZ,SAAK,aAAa;AAAA,EACpB;AACF;AAKO,IAAM,kBAAN,cAA8B,eAAe;AAAA;AAAA;AAAA;AAAA,EAIlC;AAAA,EAEhB,YAAY,SAAiB,UAAmB;AAC9C,UAAM,SAAS,QAAW,kBAAkB;AAC5C,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;AAKO,SAAS,cACd,QACA,MACO;AACP,QAAM,UAAU,KAAK,SAAS,KAAK,WAAW;AAE9C,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,YAAM,IAAI,gBAAgB,SAAS,KAAK,OAAO;AAAA,IACjD,KAAK;AACH,YAAM,IAAI,oBAAoB,OAAO;AAAA,IACvC,KAAK;AACH,YAAM,IAAI,eAAe,OAAO;AAAA,IAClC,KAAK;AAAA,IACL,KAAK;AACH,YAAM,IAAI,wBAAwB,OAAO;AAAA,IAC3C;AACE,YAAM,IAAI,eAAe,SAAS,QAAQ,QAAW,KAAK,OAAO;AAAA,EACrE;AACF;;;AD5GO,IAAM,aAAN,MAAiB;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAA4B;AACtC,SAAK,SAAS,QAAQ;AACtB,SAAK,SAAS,QAAQ,OAAO,QAAQ,OAAO,EAAE;AAC9C,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,gBAAgB,QAAQ,iBAAiB;AAE9C,SAAK,WAAW,MAAM,OAAO;AAAA,MAC3B,SAAS,KAAK;AAAA,MACd,SAAS,QAAQ,aAAa;AAAA,MAC9B,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,aAAa,KAAK;AAAA,MACpB;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,YAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKQ,MAAM,SAAgC;AAC5C,WAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,UAAU,GAAI,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,QAA0B;AACjD,WAAO,WAAW,OAAO,WAAW,OAAO,WAAW;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,QACZ,QAC2B;AAC3B,QAAI;AAEJ,aAAS,UAAU,GAAG,UAAU,KAAK,YAAY,WAAW;AAC1D,UAAI;AAEF,YACE,OAAO,UACP,CAAC,QAAQ,OAAO,OAAO,EAAE,SAAS,OAAO,OAAO,YAAY,CAAC,GAC7D;AACA,gBAAM,OAAQ,OAAO,QAAQ,CAAC;AAC9B,iBAAO,OAAO,EAAE,GAAG,MAAM,QAAQ,gBAAgB;AAGjD,cAAI,OAAO,KAAK,YAAY,UAAU;AACpC,mBAAO,UAAU,KAAK,UAAU;AAAA,UAClC;AAAA,QACF;AAEA,cAAM,WAAW,MAAM,KAAK,SAAS,QAAW,MAAM;AAGtD,YACE,KAAK,iBAAiB,SAAS,MAAM,KACrC,UAAU,KAAK,aAAa,GAC5B;AACA,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAEA,eAAO;AAAA,MACT,SAAS,KAAc;AACrB,cAAM,aAAa;AAKnB,oBACE,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,YAAY,OAAO,CAAC;AACpE,cAAM,SAAS,YAAY,UAAU;AAGrC,YAAI,KAAK,iBAAiB,MAAM,KAAK,UAAU,KAAK,aAAa,GAAG;AAClE,gBAAM,KAAK,MAAM,KAAK,gBAAgB,KAAK,IAAI,GAAG,OAAO,CAAC;AAC1D;AAAA,QACF;AAGA,YAAI,YAAY,UAAU;AACxB;AAAA,YACE,WAAW,SAAS;AAAA,YACpB,WAAW,SAAS;AAAA,UAKtB;AAAA,QACF;AAEA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,UAAM,aAAa,IAAI,eAAe,8BAA8B;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,KACJ,UACA,MACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM;AAAA,MACN;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,IACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,OACJ,UACA,SAC2B;AAC3B,WAAO,KAAK,QAAW;AAAA,MACrB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL;AAAA,IACF,CAAC;AAAA,EACH;AACF;;;AE5MA,SAAS,uBAAuB;AAQhC,SAAS,cACP,QACyB;AAEzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,WAAO,gBAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,YAAY,QAAW;AAClC,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQ,cAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,KACA,SACmB;AACnB,QAAM,OAAO,gBAAgB,KAAK,OAAO;AAEzC,QAAM,WAAW,MAAM,KAAK,KAAgC,cAAc,IAAI;AAG9E,QAAM,OAAO,SAAS;AAGtB,MAAI,KAAK,YAAY,OAAO;AAC1B,UAAM,IAAI;AAAA,MACP,KAAK,SAAoB;AAAA,MAC1B;AAAA,MACC,KAAK,QAAmB;AAAA,IAC3B;AAAA,EACF;AAGA,MAAI,KAAK,YAAY,QAAQ,KAAK,MAAM;AACtC,WAAO,KAAK;AAAA,EACd;AAGA,MAAI,KAAK,OAAO,KAAK,YAAY,KAAK,MAAM;AAC1C,WAAO;AAAA,EACT;AAEA,QAAM,IAAI,eAAe,gCAAgC,QAAW,SAAS;AAC/E;;;ACjHA,SAAS,eACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,oBAAoB,QAAW;AAC1C,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,SAAS,aAAa;AACxB,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,WACpB,MACA,KACA,SACwB;AACxB,QAAM,OAAO,eAAe,KAAK,OAAO;AAExC,QAAM,WAAW,MAAM,KAAK,KAAoB,aAAa,IAAI;AAEjE,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,OAAO,SAAS,KAAK;AAAA,IACrB,QAAQ,SAAS,KAAK,UAAU;AAAA,IAChC,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;AASA,eAAsB,eACpB,MACA,OACmB;AACnB,QAAM,WAAW,MAAM,KAAK,IAAc,aAAa,KAAK,EAAE;AAE9D,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,YACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,aAAa,KAAK;AAAA,EACpB;AAEA,SAAO,SAAS,KAAK,YAAY;AACnC;AAKA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAUA,eAAsB,MACpB,MACA,KACA,SACmB;AACnB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS,WAAW;AAGpC,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,WAAW,MAAM,KAAK,OAAO;AAEzD,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU;AAG5B,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,eAAe,MAAM,KAAK;AAG/C,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,KAAK,IAAI,IAAI,YAAY,WAAW;AACtC,YAAM,IAAI,gBAAgB,OAAO,OAAO;AAAA,IAC1C;AAGA,UAAM,MAAM,YAAY;AAAA,EAC1B;AACF;AASA,eAAsB,eACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,aAAa,KAAK,SAAS;AAE9B,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;;;AC9MA,SAAS,aACP,KACA,SACyB;AACzB,QAAM,OAAgC,EAAE,IAAI;AAE5C,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,OAAO;AAClB,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,IACpB,MACA,KACA,SACsB;AACtB,QAAM,OAAO,aAAa,KAAK,OAAO;AAEtC,QAAM,WAAW,MAAM,KAAK,KAAkB,WAAW,IAAI;AAE7D,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,OAAO,SAAS,KAAK,SAAS,CAAC;AAAA,IAC/B,OAAO,SAAS,KAAK,SAAS,SAAS,KAAK,OAAO,UAAU;AAAA,IAC7D,QAAQ,SAAS,KAAK;AAAA,EACxB;AACF;;;AC1DA,SAAS,mBAAAA,wBAAuB;AAQhC,SAASC,eACP,QACyB;AACzB,MAAI,UAAU,OAAO,WAAW,YAAY,UAAU,QAAQ;AAC5D,WAAOC,iBAAgB,MAAoB;AAAA,EAC7C;AACA,SAAO;AACT;AAKA,SAAS,gBACP,OACA,SACyB;AACzB,QAAM,OAAgC,EAAE,MAAM;AAE9C,MAAI,SAAS,UAAU,QAAW;AAChC,SAAK,QAAQ,QAAQ;AAAA,EACvB;AAEA,MAAI,SAAS,MAAM;AACjB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,eAAe;AAC1B,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,MAAI,SAAS,QAAQ;AACnB,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAGA,MAAI,SAAS,SAAS;AACpB,SAAK,UAAU;AAAA,MACb,QAAQD,eAAc,QAAQ,QAAQ,MAAM;AAAA,MAC5C,cAAc,QAAQ,QAAQ;AAAA,MAC9B,UAAU,QAAQ,QAAQ;AAAA,MAC1B,gBAAgB,QAAQ,QAAQ;AAAA,IAClC;AAGA,WAAO,KAAK,KAAK,OAAkC,EAAE,QAAQ,CAAC,QAAQ;AACpE,UAAK,KAAK,QAAoC,GAAG,MAAM,QAAW;AAChE,eAAQ,KAAK,QAAoC,GAAG;AAAA,MACtD;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AACT;AAUA,eAAsB,OACpB,MACA,OACA,SACyB;AACzB,QAAM,OAAO,gBAAgB,OAAO,OAAO;AAE3C,QAAM,WAAW,MAAM,KAAK,KAAqB,cAAc,IAAI;AAEnE,MAAI,CAAC,SAAS,KAAK,SAAS;AAC1B,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,cAAc,SAAS,KAAK;AAAA,IAC5B,YAAY,SAAS,KAAK;AAAA,IAC1B,SAAS,SAAS,KAAK;AAAA,EACzB;AACF;;;AC9FA,SAAS,eACP,MACA,SACyB;AACzB,QAAM,OAAgC,EAAE,KAAK;AAE7C,MAAI,SAAS,SAAS;AAEpB,UAAM,aAAa,QAAQ;AAC3B,QAAI,WAAW,OAAQ,MAAK,SAAS,WAAW;AAChD,QAAI,WAAW,QAAS,MAAK,UAAU,WAAW;AAClD,QAAI,WAAW,oBAAoB,OAAW,MAAK,kBAAkB,WAAW;AAChF,QAAI,WAAW,YAAa,MAAK,cAAc,WAAW;AAC1D,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,YAAY,OAAW,MAAK,UAAU,WAAW;AAChE,QAAI,WAAW,MAAO,MAAK,QAAQ,WAAW;AAAA,EAChD;AAEA,MAAI,SAAS,WAAW,MAAM;AAC5B,SAAK,UAAU,QAAQ;AAAA,EACzB;AAEA,MAAI,SAAS,cAAc,MAAM;AAC/B,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,SAAS,qBAAqB,MAAM;AACtC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,SAAS,kBAAkB,MAAM;AACnC,SAAK,iBAAiB,QAAQ;AAAA,EAChC;AAEA,MAAI,SAAS,WAAW;AACtB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,SAAO;AACT;AAUA,eAAsB,iBACpB,MACA,MACA,SAC8B;AAC9B,MAAI,CAAC,MAAM,QAAQ,IAAI,KAAK,KAAK,WAAW,GAAG;AAC7C,UAAM,IAAI,eAAe,8BAA8B,KAAK,kBAAkB;AAAA,EAChF;AAEA,QAAM,OAAO,eAAe,MAAM,OAAO;AAEzC,QAAM,UAAkC,CAAC;AACzC,MAAI,SAAS,gBAAgB;AAC3B,YAAQ,iBAAiB,IAAI,QAAQ;AAAA,EACvC;AAEA,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B;AAAA,IACA;AAAA,IACA,OAAO,KAAK,OAAO,EAAE,SAAS,IAAI,UAAU;AAAA,EAC9C;AAEA,MAAI,CAAC,SAAS,KAAK,WAAW,CAAC,SAAS,KAAK,IAAI;AAC/C,UAAM,IAAI;AAAA,MACR,SAAS,KAAK,SAAS;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT,IAAI,SAAS,KAAK;AAAA,IAClB,KAAK,SAAS,KAAK;AAAA,IACnB,aAAa,SAAS,KAAK;AAAA,EAC7B;AACF;AASA,eAAsB,qBACpB,MACA,OACyB;AACzB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO;AAAA,IACL,IAAI,SAAS,KAAK,MAAM;AAAA,IACxB,QAAQ,SAAS,KAAK;AAAA,IACtB,OAAO,SAAS,KAAK,SAAS;AAAA,IAC9B,WAAW,SAAS,KAAK,aAAa;AAAA,IACtC,aAAa,SAAS,KAAK;AAAA,IAC3B,WAAW,SAAS,KAAK;AAAA,IACzB,MAAM,SAAS,KAAK,QAAQ;AAAA,IAC5B,MAAM,SAAS,KAAK,QAAQ,CAAC;AAAA,IAC7B,OAAO,SAAS,KAAK;AAAA,EACvB;AACF;AASA,eAAsB,kBACpB,MACA,OACkB;AAClB,QAAM,WAAW,MAAM,KAAK;AAAA,IAC1B,oBAAoB,KAAK;AAAA,EAC3B;AAEA,SAAO,SAAS,KAAK,WAAW,eAAe,SAAS,KAAK,YAAY;AAC3E;AASA,eAAsB,qBACpB,MACA,OAC8B;AAC9B,QAAM,WAAW,MAAM,KAAK,IAKzB,oBAAoB,KAAK,SAAS;AAErC,QAAM,UAAU,SAAS,KAAK,QAAQ,SAAS;AAC/C,SAAO;AAAA,IACL,SAAS,QAAQ,UAAU,CAAC,GAAG,IAAI,CAAC,OAAO;AAAA,MACzC,IAAI,EAAE,MAAM;AAAA,MACZ,WAAW,EAAE;AAAA,MACb,KAAK,EAAE,OAAO;AAAA,MACd,MAAM,EAAE;AAAA,MACR,OAAO,EAAE,SAAS,EAAE,WAAW;AAAA,IACjC,EAAE;AAAA,IACF,eAAe,QAAQ,iBAAiB,CAAC;AAAA,EAC3C;AACF;AAKA,SAASE,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,uBACb,MACA,OACA,eAAuB,KACvB,SACyB;AACzB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,qBAAqB,MAAM,KAAK;AAGrD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AAUA,eAAsB,YACpB,MACA,MACA,SACyB;AACzB,QAAM,eAAe,SAAS,gBAAgB;AAC9C,QAAM,UAAU,SAAS;AAGzB,QAAM,EAAE,IAAI,MAAM,IAAI,MAAM,iBAAiB,MAAM,MAAM,OAAO;AAGhE,SAAO,uBAAuB,MAAM,OAAO,cAAc,OAAO;AAClE;;;ACjQA,SAAS,mBAAAC,wBAAuB;AAQhC,SAAS,YAAY,OAAqC;AACxD,SACE,UAAU,QACV,OAAO,UAAU,YACjB,UAAU,UACT,OAAQ,MAAc,cAAc,cAAc,OAAQ,MAAc,UAAU;AAEvF;AAKA,SAASC,eACP,QACyB;AACzB,MAAI,YAAY,MAAM,GAAG;AACvB,WAAOC,iBAAgB,MAAM;AAAA,EAC/B;AACA,SAAO;AACT;AAKA,SAAS,iBACP,SACyB;AACzB,QAAM,OAAgC,CAAC;AAEvC,MAAI,QAAQ,MAAM;AAChB,SAAK,OAAO,QAAQ;AAAA,EACtB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAAS,QAAQ;AAAA,EACxB;AAEA,MAAI,QAAQ,UAAU,MAAM;AAC1B,SAAK,SAASD,eAAc,QAAQ,MAAM;AAAA,EAC5C;AAEA,MAAI,QAAQ,gBAAgB,MAAM;AAChC,SAAK,eAAe,QAAQ;AAAA,EAC9B;AAEA,MAAI,QAAQ,sBAAsB,MAAM;AACtC,SAAK,qBAAqB,QAAQ;AAAA,EACpC;AAEA,MAAI,QAAQ,mBAAmB,MAAM;AACnC,SAAK,kBAAkB,QAAQ;AAAA,EACjC;AAEA,MAAI,QAAQ,eAAe,MAAM;AAC/B,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAEA,MAAI,QAAQ,qBAAqB,MAAM;AACrC,SAAK,oBAAoB,QAAQ;AAAA,EACnC;AAEA,MAAI,QAAQ,UAAU;AACpB,SAAK,WAAW,QAAQ;AAAA,EAC1B;AAEA,MAAI,QAAQ,WAAW;AACrB,SAAK,aAAa,QAAQ;AAAA,EAC5B;AAEA,MAAI,QAAQ,eAAe;AACzB,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAEA,SAAO;AACT;AASA,eAAsB,aACpB,MACA,SAC0B;AAC1B,QAAM,OAAO,iBAAiB,OAAO;AAErC,QAAM,WAAW,MAAM,KAAK,KAAsB,eAAe,IAAI;AAErE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AASA,eAAsB,iBACpB,MACA,OAC0B;AAC1B,QAAM,WAAW,MAAM,KAAK,IAAqB,eAAe,KAAK,EAAE;AAEvE,MAAI,SAAS,KAAK,YAAY,SAAS,SAAS,KAAK,OAAO;AAC1D,UAAM,IAAI;AAAA,MACR,SAAS,KAAK;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO,SAAS;AAClB;AAKA,SAASE,OAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAWA,eAAe,yBACb,MACA,OACA,eAAuB,KACvB,SAC0B;AAC1B,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,YAAY,UAAU,UAAU,MAAO;AAE7C,SAAO,MAAM;AACX,UAAM,SAAS,MAAM,iBAAiB,MAAM,KAAK;AAGjD,QAAI,OAAO,WAAW,aAAa;AACjC,aAAO;AAAA,IACT;AAEA,QAAI,OAAO,WAAW,UAAU;AAC9B,YAAM,IAAI;AAAA,QACR,OAAO,SAAS;AAAA,QAChB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,QAAI,OAAO,WAAW,aAAa;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,aAAa,KAAK,IAAI,IAAI,YAAY,WAAW;AACnD,YAAM,IAAI,gBAAgB,OAAO,OAAQ;AAAA,IAC3C;AAGA,UAAMA,OAAM,KAAK,IAAI,KAAM,YAAY,CAAC;AAAA,EAC1C;AACF;AA8BA,eAAsB,QACpB,MACA,SAC0B;AAC1B,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,UAAU,QAAQ;AAGxB,QAAM,UAAU,MAAM,aAAa,MAAM,OAAO;AAGhD,MAAI,CAAC,QAAQ,IAAI;AACf,WAAO;AAAA,EACT;AAGA,MAAI,QAAQ,WAAW,aAAa;AAClC,WAAO;AAAA,EACT;AAGA,SAAO,yBAAyB,MAAM,QAAQ,IAAI,cAAc,OAAO;AACzE;;;AC3OA,eAAsB,eAAe,MAA4C;AAC/E,QAAM,WAAW,MAAM,KAAK,IAAqB,iBAAiB;AAElE,SAAO;AAAA,IACL,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,EAClD;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,aAAa;AAE1D,SAAO;AAAA,IACL,kBAAkB,SAAS,KAAK,oBAAoB;AAAA,IACpD,aAAa,SAAS,KAAK;AAAA,IAC3B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,cAAc,MAAuC;AACzE,QAAM,WAAW,MAAM,KAAK,IAAgB,YAAY;AAExD,SAAO;AAAA,IACL,iBAAiB,SAAS,KAAK,mBAAmB;AAAA,IAClD,YAAY,SAAS,KAAK;AAAA,IAC1B,oBAAoB,SAAS,KAAK;AAAA,IAClC,kBAAkB,SAAS,KAAK;AAAA,EAClC;AACF;AAQA,eAAsB,eAAe,MAAwC;AAC3E,QAAM,WAAW,MAAM,KAAK,IAAiB,WAAW;AAExD,SAAO;AAAA,IACL,SAAS,SAAS,KAAK,WAAW;AAAA,IAClC,aAAa,SAAS,KAAK,eAAe;AAAA,IAC1C,mBAAmB,SAAS,KAAK,qBAAqB;AAAA,IACtD,oBAAoB,SAAS,KAAK,sBAAsB;AAAA,IACxD,gBAAgB,SAAS,KAAK,kBAAkB;AAAA,IAChD,mBAAmB,SAAS,KAAK;AAAA,EACnC;AACF;;;ACYO,IAAM,kBAAN,MAAsB;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQjB,YAAY,UAAkC,CAAC,GAAG;AAChD,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,qBAAqB;AAClE,UAAM,UACJ,QAAQ,UACR,QAAQ,IAAI,qBACZ,4BACA,QAAQ,OAAO,EAAE;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,OAAO,IAAI,WAAW;AAAA,MACzB;AAAA,MACA;AAAA,MACA,WAAW,QAAQ;AAAA,MACnB,YAAY,QAAQ;AAAA,MACpB,eAAe,QAAQ;AAAA,IACzB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,KAAa,SAA4C;AACpE,WAAO,OAAO,KAAK,MAAM,KAAK,OAAO;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BA,MAAM,iBACJ,MACA,SAC8B;AAC9B,WAAO,iBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAAwC;AACjE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,kBAAkB,OAAiC;AACvD,WAAO,kBAAkB,KAAK,MAAM,KAAK;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,OAA6C;AACtE,WAAO,qBAAqB,KAAK,MAAM,KAAK;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,SACyB;AACzB,WAAO,YAAY,KAAK,MAAM,MAAM,OAAO;AAAA,EAC7C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+BA,MAAM,WAAW,KAAa,SAAgD;AAC5E,WAAO,WAAW,KAAK,MAAM,KAAK,OAAO;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAAkC;AACrD,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,OAAiC;AACjD,WAAO,YAAY,KAAK,MAAM,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,eAAe,OAA6C;AAChE,WAAO,eAAe,KAAK,MAAM,KAAK;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,MAAM,KAAa,SAA2C;AAClE,WAAO,MAAM,KAAK,MAAM,KAAK,OAAO;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA6BA,MAAM,aAAa,SAA0D;AAC3E,WAAO,aAAa,KAAK,MAAM,OAAO;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAiB,OAAyC;AAC9D,WAAO,iBAAiB,KAAK,MAAM,KAAK;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuCA,MAAM,QAAQ,SAA0D;AACtE,WAAO,QAAQ,KAAK,MAAM,OAAO;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,IAAI,KAAa,SAA4C;AACjE,WAAO,IAAI,KAAK,MAAM,KAAK,OAAO;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,MAAM,OAAO,OAAe,SAAkD;AAC5E,WAAO,OAAO,KAAK,MAAM,OAAO,OAAO;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,iBAA2C;AAC/C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,gBAAqC;AACzC,WAAO,cAAc,KAAK,IAAI;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAM,iBAAuC;AAC3C,WAAO,eAAe,KAAK,IAAI;AAAA,EACjC;AACF;","names":["zodToJsonSchema","convertSchema","zodToJsonSchema","sleep","zodToJsonSchema","convertSchema","zodToJsonSchema","sleep"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@crawlgate/sdk",
3
- "version": "1.0.0",
3
+ "version": "1.0.1",
4
4
  "description": "Official JavaScript/TypeScript SDK for CrawlGate Search Engine API",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
@@ -35,7 +35,7 @@
35
35
  "homepage": "https://crawlgate.io",
36
36
  "repository": {
37
37
  "type": "git",
38
- "url": "https://github.com/crawlgate/sdk"
38
+ "url": "git+https://github.com/crawlgate/sdk.git"
39
39
  },
40
40
  "bugs": {
41
41
  "url": "https://github.com/crawlgate/sdk/issues"