@crawlee/jsdom 3.13.3-beta.12 → 3.13.3-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,7 +39,7 @@ JSONData extends Dictionary = any> extends InternalHttpCrawlingContext<UserData,
39
39
  */
40
40
  waitForSelector(selector: string, timeoutMs?: number): Promise<void>;
41
41
  /**
42
- * Returns Cheerio handle, allowing to work with the data same way as with {@apilink CheerioCrawler}.
42
+ * Returns Cheerio handle, allowing to work with the data same way as with {@link CheerioCrawler}.
43
43
  * When provided with the `selector` argument, it will first look for the selector with a 5s timeout.
44
44
  *
45
45
  * **Example usage:**
@@ -186,9 +186,9 @@ interface EnqueueLinksInternalOptions {
186
186
  // @ts-ignore optional peer dependency or compatibility with es2022
187
187
  export declare function domCrawlerEnqueueLinks({ options, window, requestQueue, robotsTxtFile, onSkippedRequest, originalRequestUrl, finalRequestUrl, }: EnqueueLinksInternalOptions): Promise<import("@crawlee/types").BatchAddRequestsResult>;
188
188
  /**
189
- * Creates new {@apilink Router} instance that works based on request labels.
190
- * This instance can then serve as a `requestHandler` of your {@apilink JSDOMCrawler}.
191
- * Defaults to the {@apilink JSDOMCrawlingContext}.
189
+ * Creates new {@link Router} instance that works based on request labels.
190
+ * This instance can then serve as a `requestHandler` of your {@link JSDOMCrawler}.
191
+ * Defaults to the {@link JSDOMCrawlingContext}.
192
192
  *
193
193
  * > Serves as a shortcut for using `Router.create<JSDOMCrawlingContext>()`.
194
194
  *
@@ -19,10 +19,10 @@ const utilities_1 = require("@apify/utilities");
19
19
  *
20
20
  * Since `JSDOMCrawler` uses raw HTTP requests to download web pages,
21
21
  * it is very fast and efficient on data bandwidth. However, if the target website requires JavaScript
22
- * to display the content, you might need to use {@apilink PuppeteerCrawler} or {@apilink PlaywrightCrawler} instead,
22
+ * to display the content, you might need to use {@link PuppeteerCrawler} or {@link PlaywrightCrawler} instead,
23
23
  * because it loads the pages using full-featured headless Chrome browser.
24
24
  *
25
- * Alternatively, you can use {@apilink JSDOMCrawlerOptions.runScripts} to run website scripts in Node.
25
+ * Alternatively, you can use {@link JSDOMCrawlerOptions.runScripts} to run website scripts in Node.
26
26
  * JSDOM does not implement all the standards, so websites can break.
27
27
  *
28
28
  * **Limitation**:
@@ -30,18 +30,18 @@ const utilities_1 = require("@apify/utilities");
30
30
  *
31
31
  * `JSDOMCrawler` downloads each URL using a plain HTTP request,
32
32
  * parses the HTML content using [JSDOM](https://www.npmjs.com/package/jsdom)
33
- * and then invokes the user-provided {@apilink JSDOMCrawlerOptions.requestHandler} to extract page data
33
+ * and then invokes the user-provided {@link JSDOMCrawlerOptions.requestHandler} to extract page data
34
34
  * using the `window` object.
35
35
  *
36
- * The source URLs are represented using {@apilink Request} objects that are fed from
37
- * {@apilink RequestList} or {@apilink RequestQueue} instances provided by the {@apilink JSDOMCrawlerOptions.requestList}
38
- * or {@apilink JSDOMCrawlerOptions.requestQueue} constructor options, respectively.
36
+ * The source URLs are represented using {@link Request} objects that are fed from
37
+ * {@link RequestList} or {@link RequestQueue} instances provided by the {@link JSDOMCrawlerOptions.requestList}
38
+ * or {@link JSDOMCrawlerOptions.requestQueue} constructor options, respectively.
39
39
  *
40
- * If both {@apilink JSDOMCrawlerOptions.requestList} and {@apilink JSDOMCrawlerOptions.requestQueue} are used,
41
- * the instance first processes URLs from the {@apilink RequestList} and automatically enqueues all of them
42
- * to {@apilink RequestQueue} before it starts their processing. This ensures that a single URL is not crawled multiple times.
40
+ * If both {@link JSDOMCrawlerOptions.requestList} and {@link JSDOMCrawlerOptions.requestQueue} are used,
41
+ * the instance first processes URLs from the {@link RequestList} and automatically enqueues all of them
42
+ * to {@link RequestQueue} before it starts their processing. This ensures that a single URL is not crawled multiple times.
43
43
  *
44
- * The crawler finishes when there are no more {@apilink Request} objects to crawl.
44
+ * The crawler finishes when there are no more {@link Request} objects to crawl.
45
45
  *
46
46
  * We can use the `preNavigationHooks` to adjust `gotOptions`:
47
47
  *
@@ -56,15 +56,15 @@ const utilities_1 = require("@apify/utilities");
56
56
  * By default, `JSDOMCrawler` only processes web pages with the `text/html`
57
57
  * and `application/xhtml+xml` MIME content types (as reported by the `Content-Type` HTTP header),
58
58
  * and skips pages with other content types. If you want the crawler to process other content types,
59
- * use the {@apilink JSDOMCrawlerOptions.additionalMimeTypes} constructor option.
59
+ * use the {@link JSDOMCrawlerOptions.additionalMimeTypes} constructor option.
60
60
  * Beware that the parsing behavior differs for HTML, XML, JSON and other types of content.
61
- * For more details, see {@apilink JSDOMCrawlerOptions.requestHandler}.
61
+ * For more details, see {@link JSDOMCrawlerOptions.requestHandler}.
62
62
  *
63
63
  * New requests are only dispatched when there is enough free CPU and memory available,
64
- * using the functionality provided by the {@apilink AutoscaledPool} class.
65
- * All {@apilink AutoscaledPool} configuration options can be passed to the `autoscaledPoolOptions`
64
+ * using the functionality provided by the {@link AutoscaledPool} class.
65
+ * All {@link AutoscaledPool} configuration options can be passed to the `autoscaledPoolOptions`
66
66
  * parameter of the `CheerioCrawler` constructor. For user convenience, the `minConcurrency` and `maxConcurrency`
67
- * {@apilink AutoscaledPool} options are available directly in the `CheerioCrawler` constructor.
67
+ * {@link AutoscaledPool} options are available directly in the `CheerioCrawler` constructor.
68
68
  *
69
69
  * **Example usage:**
70
70
  *
@@ -285,9 +285,9 @@ function extractUrlsFromWindow(window, selector, baseUrl) {
285
285
  .filter((href) => href !== undefined && href !== '');
286
286
  }
287
287
  /**
288
- * Creates new {@apilink Router} instance that works based on request labels.
289
- * This instance can then serve as a `requestHandler` of your {@apilink JSDOMCrawler}.
290
- * Defaults to the {@apilink JSDOMCrawlingContext}.
288
+ * Creates new {@link Router} instance that works based on request labels.
289
+ * This instance can then serve as a `requestHandler` of your {@link JSDOMCrawler}.
290
+ * Defaults to the {@link JSDOMCrawlingContext}.
291
291
  *
292
292
  * > Serves as a shortcut for using `Router.create<JSDOMCrawlingContext>()`.
293
293
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@crawlee/jsdom",
3
- "version": "3.13.3-beta.12",
3
+ "version": "3.13.3-beta.13",
4
4
  "description": "The scalable web crawling and scraping library for JavaScript/Node.js. Enables development of data extraction and web automation jobs (not only) with headless Chrome and Puppeteer.",
5
5
  "engines": {
6
6
  "node": ">=16.0.0"
@@ -55,9 +55,9 @@
55
55
  "dependencies": {
56
56
  "@apify/timeout": "^0.3.0",
57
57
  "@apify/utilities": "^2.7.10",
58
- "@crawlee/http": "3.13.3-beta.12",
59
- "@crawlee/types": "3.13.3-beta.12",
60
- "@crawlee/utils": "3.13.3-beta.12",
58
+ "@crawlee/http": "3.13.3-beta.13",
59
+ "@crawlee/types": "3.13.3-beta.13",
60
+ "@crawlee/utils": "3.13.3-beta.13",
61
61
  "@types/jsdom": "^21.0.0",
62
62
  "cheerio": "1.0.0-rc.12",
63
63
  "jsdom": "^26.0.0",
@@ -71,5 +71,5 @@
71
71
  }
72
72
  }
73
73
  },
74
- "gitHead": "fac14d5b8671e2984cff9dddb6031f6ed5b1a372"
74
+ "gitHead": "dbeb9038f0ef619689f9067563cddcb375207ab6"
75
75
  }