@crawlee/got-scraping-client 4.0.0-beta.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +153 -0
- package/index.d.ts +15 -0
- package/index.d.ts.map +1 -0
- package/index.js +54 -0
- package/index.js.map +1 -0
- package/package.json +61 -0
package/README.md
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
<h1 align="center">
|
|
2
|
+
<a href="https://crawlee.dev">
|
|
3
|
+
<picture>
|
|
4
|
+
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/apify/crawlee/master/website/static/img/crawlee-dark.svg?sanitize=true">
|
|
5
|
+
<img alt="Crawlee" src="https://raw.githubusercontent.com/apify/crawlee/master/website/static/img/crawlee-light.svg?sanitize=true" width="500">
|
|
6
|
+
</picture>
|
|
7
|
+
</a>
|
|
8
|
+
<br>
|
|
9
|
+
<small>A web scraping and browser automation library</small>
|
|
10
|
+
</h1>
|
|
11
|
+
|
|
12
|
+
<p align=center>
|
|
13
|
+
<a href="https://trendshift.io/repositories/5179" target="_blank"><img src="https://trendshift.io/api/badge/repositories/5179" alt="apify%2Fcrawlee | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
|
14
|
+
</p>
|
|
15
|
+
|
|
16
|
+
<p align=center>
|
|
17
|
+
<a href="https://www.npmjs.com/package/@crawlee/core" rel="nofollow"><img src="https://img.shields.io/npm/v/@crawlee/core.svg" alt="NPM latest version" data-canonical-src="https://img.shields.io/npm/v/@crawlee/core/next.svg" style="max-width: 100%;"></a>
|
|
18
|
+
<a href="https://www.npmjs.com/package/@crawlee/core" rel="nofollow"><img src="https://img.shields.io/npm/dm/@crawlee/core.svg" alt="Downloads" data-canonical-src="https://img.shields.io/npm/dm/@crawlee/core.svg" style="max-width: 100%;"></a>
|
|
19
|
+
<a href="https://discord.gg/jyEM2PRvMU" rel="nofollow"><img src="https://img.shields.io/discord/801163717915574323?label=discord" alt="Chat on discord" data-canonical-src="https://img.shields.io/discord/801163717915574323?label=discord" style="max-width: 100%;"></a>
|
|
20
|
+
<a href="https://github.com/apify/crawlee/actions/workflows/test-ci.yml"><img src="https://github.com/apify/crawlee/actions/workflows/test-ci.yml/badge.svg?branch=master" alt="Build Status" style="max-width: 100%;"></a>
|
|
21
|
+
</p>
|
|
22
|
+
|
|
23
|
+
Crawlee covers your crawling and scraping end-to-end and **helps you build reliable scrapers. Fast.**
|
|
24
|
+
|
|
25
|
+
Your crawlers will appear human-like and fly under the radar of modern bot protections even with the default configuration. Crawlee gives you the tools to crawl the web for links, scrape data, and store it to disk or cloud while staying configurable to suit your project's needs.
|
|
26
|
+
|
|
27
|
+
Crawlee is available as the [`crawlee`](https://www.npmjs.com/package/crawlee) NPM package.
|
|
28
|
+
|
|
29
|
+
> 👉 **View full documentation, guides and examples on the [Crawlee project website](https://crawlee.dev)** 👈
|
|
30
|
+
|
|
31
|
+
> Do you prefer 🐍 Python instead of JavaScript? [👉 Checkout Crawlee for Python 👈](https://github.com/apify/crawlee-python).
|
|
32
|
+
|
|
33
|
+
## Installation
|
|
34
|
+
|
|
35
|
+
We recommend visiting the [Introduction tutorial](https://crawlee.dev/js/docs/introduction) in Crawlee documentation for more information.
|
|
36
|
+
|
|
37
|
+
> Crawlee requires **Node.js 16 or higher**.
|
|
38
|
+
|
|
39
|
+
### With Crawlee CLI
|
|
40
|
+
|
|
41
|
+
The fastest way to try Crawlee out is to use the **Crawlee CLI** and choose the **Getting started example**. The CLI will install all the necessary dependencies and add boilerplate code for you to play with.
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
npx crawlee create my-crawler
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
cd my-crawler
|
|
49
|
+
npm start
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Manual installation
|
|
53
|
+
If you prefer adding Crawlee **into your own project**, try the example below. Because it uses `PlaywrightCrawler` we also need to install [Playwright](https://playwright.dev). It's not bundled with Crawlee to reduce install size.
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
npm install crawlee playwright
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
```js
|
|
60
|
+
import { PlaywrightCrawler, Dataset } from 'crawlee';
|
|
61
|
+
|
|
62
|
+
// PlaywrightCrawler crawls the web using a headless
|
|
63
|
+
// browser controlled by the Playwright library.
|
|
64
|
+
const crawler = new PlaywrightCrawler({
|
|
65
|
+
// Use the requestHandler to process each of the crawled pages.
|
|
66
|
+
async requestHandler({ request, page, enqueueLinks, log }) {
|
|
67
|
+
const title = await page.title();
|
|
68
|
+
log.info(`Title of ${request.loadedUrl} is '${title}'`);
|
|
69
|
+
|
|
70
|
+
// Save results as JSON to ./storage/datasets/default
|
|
71
|
+
await Dataset.pushData({ title, url: request.loadedUrl });
|
|
72
|
+
|
|
73
|
+
// Extract links from the current page
|
|
74
|
+
// and add them to the crawling queue.
|
|
75
|
+
await enqueueLinks();
|
|
76
|
+
},
|
|
77
|
+
// Uncomment this option to see the browser window.
|
|
78
|
+
// headless: false,
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
// Add first URL to the queue and start the crawl.
|
|
82
|
+
await crawler.run(['https://crawlee.dev']);
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
By default, Crawlee stores data to `./storage` in the current working directory. You can override this directory via Crawlee configuration. For details, see [Configuration guide](https://crawlee.dev/js/docs/guides/configuration), [Request storage](https://crawlee.dev/js/docs/guides/request-storage) and [Result storage](https://crawlee.dev/js/docs/guides/result-storage).
|
|
86
|
+
|
|
87
|
+
### Installing pre-release versions
|
|
88
|
+
|
|
89
|
+
We provide automated beta builds for every merged code change in Crawlee. You can find them in the npm [list of releases](https://www.npmjs.com/package/crawlee?activeTab=versions). If you want to test new features or bug fixes before we release them, feel free to install a beta build like this:
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
npm install crawlee@3.12.3-beta.13
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
If you also use the [Apify SDK](https://github.com/apify/apify-sdk-js), you need to specify dependency overrides in your `package.json` file so that you don't end up with multiple versions of Crawlee installed:
|
|
96
|
+
|
|
97
|
+
```json
|
|
98
|
+
{
|
|
99
|
+
"overrides": {
|
|
100
|
+
"apify": {
|
|
101
|
+
"@crawlee/core": "3.12.3-beta.13",
|
|
102
|
+
"@crawlee/types": "3.12.3-beta.13",
|
|
103
|
+
"@crawlee/utils": "3.12.3-beta.13"
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## 🛠 Features
|
|
110
|
+
|
|
111
|
+
- Single interface for **HTTP and headless browser** crawling
|
|
112
|
+
- Persistent **queue** for URLs to crawl (breadth & depth first)
|
|
113
|
+
- Pluggable **storage** of both tabular data and files
|
|
114
|
+
- Automatic **scaling** with available system resources
|
|
115
|
+
- Integrated **proxy rotation** and session management
|
|
116
|
+
- Lifecycles customizable with **hooks**
|
|
117
|
+
- **CLI** to bootstrap your projects
|
|
118
|
+
- Configurable **routing**, **error handling** and **retries**
|
|
119
|
+
- **Dockerfiles** ready to deploy
|
|
120
|
+
- Written in **TypeScript** with generics
|
|
121
|
+
|
|
122
|
+
### 👾 HTTP crawling
|
|
123
|
+
|
|
124
|
+
- Zero config **HTTP2 support**, even for proxies
|
|
125
|
+
- Automatic generation of **browser-like headers**
|
|
126
|
+
- Replication of browser **TLS fingerprints**
|
|
127
|
+
- Integrated fast **HTML parsers**. Cheerio and JSDOM
|
|
128
|
+
- Yes, you can scrape **JSON APIs** as well
|
|
129
|
+
|
|
130
|
+
### 💻 Real browser crawling
|
|
131
|
+
|
|
132
|
+
- JavaScript **rendering** and **screenshots**
|
|
133
|
+
- **Headless** and **headful** support
|
|
134
|
+
- Zero-config generation of **human-like fingerprints**
|
|
135
|
+
- Automatic **browser management**
|
|
136
|
+
- Use **Playwright** and **Puppeteer** with the same interface
|
|
137
|
+
- **Chrome**, **Firefox**, **Webkit** and many others
|
|
138
|
+
|
|
139
|
+
## Usage on the Apify platform
|
|
140
|
+
|
|
141
|
+
Crawlee is open-source and runs anywhere, but since it's developed by [Apify](https://apify.com), it's easy to set up on the Apify platform and run in the cloud. Visit the [Apify SDK website](https://sdk.apify.com) to learn more about deploying Crawlee to the Apify platform.
|
|
142
|
+
|
|
143
|
+
## Support
|
|
144
|
+
|
|
145
|
+
If you find any bug or issue with Crawlee, please [submit an issue on GitHub](https://github.com/apify/crawlee/issues). For questions, you can ask on [Stack Overflow](https://stackoverflow.com/questions/tagged/apify), in GitHub Discussions or you can join our [Discord server](https://discord.com/invite/jyEM2PRvMU).
|
|
146
|
+
|
|
147
|
+
## Contributing
|
|
148
|
+
|
|
149
|
+
Your code contributions are welcome, and you'll be praised to eternity! If you have any ideas for improvements, either submit an issue or create a pull request. For contribution guidelines and the code of conduct, see [CONTRIBUTING.md](https://github.com/apify/crawlee/blob/master/CONTRIBUTING.md).
|
|
150
|
+
|
|
151
|
+
## License
|
|
152
|
+
|
|
153
|
+
This project is licensed under the Apache License 2.0 - see the [LICENSE.md](https://github.com/apify/crawlee/blob/master/LICENSE.md) file for details.
|
package/index.d.ts
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { BaseHttpClient, type CustomFetchOptions } from '@crawlee/http-client';
|
|
2
|
+
/**
|
|
3
|
+
* A HTTP client implementation based on the `got-scraping` library.
|
|
4
|
+
*/
|
|
5
|
+
export declare class GotScrapingHttpClient extends BaseHttpClient {
|
|
6
|
+
/**
|
|
7
|
+
* Type guard that validates the HTTP method (excluding CONNECT).
|
|
8
|
+
* @param request - The HTTP request to validate
|
|
9
|
+
*/
|
|
10
|
+
private validateRequest;
|
|
11
|
+
private iterateHeaders;
|
|
12
|
+
private parseHeaders;
|
|
13
|
+
fetch(request: Request, options?: RequestInit & CustomFetchOptions): Promise<Response>;
|
|
14
|
+
}
|
|
15
|
+
//# sourceMappingURL=index.d.ts.map
|
package/index.d.ts.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,cAAc,EAAE,KAAK,kBAAkB,EAAmB,MAAM,sBAAsB,CAAC;AAGhG;;GAEG;AACH,qBAAa,qBAAsB,SAAQ,cAAc;IACrD;;;OAGG;IACH,OAAO,CAAC,eAAe;IAMvB,OAAO,CAAE,cAAc;IAavB,OAAO,CAAC,YAAY;IAIL,KAAK,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,kBAAkB,GAAG,OAAO,CAAC,QAAQ,CAAC;CA0BxG"}
|
package/index.js
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { Readable } from 'node:stream';
|
|
2
|
+
import { BaseHttpClient, ResponseWithUrl } from '@crawlee/http-client';
|
|
3
|
+
import { gotScraping } from 'got-scraping';
|
|
4
|
+
/**
|
|
5
|
+
* A HTTP client implementation based on the `got-scraping` library.
|
|
6
|
+
*/
|
|
7
|
+
export class GotScrapingHttpClient extends BaseHttpClient {
|
|
8
|
+
/**
|
|
9
|
+
* Type guard that validates the HTTP method (excluding CONNECT).
|
|
10
|
+
* @param request - The HTTP request to validate
|
|
11
|
+
*/
|
|
12
|
+
validateRequest(request) {
|
|
13
|
+
return !['CONNECT', 'connect'].includes(request.method);
|
|
14
|
+
}
|
|
15
|
+
*iterateHeaders(headers) {
|
|
16
|
+
for (const [key, value] of Object.entries(headers)) {
|
|
17
|
+
if (key.startsWith(':') || value === undefined)
|
|
18
|
+
continue;
|
|
19
|
+
if (Array.isArray(value)) {
|
|
20
|
+
for (const v of value)
|
|
21
|
+
yield [key, v];
|
|
22
|
+
}
|
|
23
|
+
else {
|
|
24
|
+
yield [key, value];
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
parseHeaders(headers) {
|
|
29
|
+
return new Headers([...this.iterateHeaders(headers)]);
|
|
30
|
+
}
|
|
31
|
+
async fetch(request, options) {
|
|
32
|
+
const { proxyUrl, redirect } = options ?? {};
|
|
33
|
+
if (!this.validateRequest(request)) {
|
|
34
|
+
throw new Error(`The HTTP method CONNECT is not supported by the GotScrapingHttpClient.`);
|
|
35
|
+
}
|
|
36
|
+
const gotResult = await gotScraping({
|
|
37
|
+
url: request.url,
|
|
38
|
+
method: request.method,
|
|
39
|
+
headers: Object.fromEntries(request.headers.entries()),
|
|
40
|
+
body: request.body ? Readable.fromWeb(request.body) : undefined,
|
|
41
|
+
proxyUrl,
|
|
42
|
+
signal: options?.signal ?? undefined,
|
|
43
|
+
followRedirect: redirect === 'follow',
|
|
44
|
+
});
|
|
45
|
+
const responseHeaders = this.parseHeaders(gotResult.headers);
|
|
46
|
+
return new ResponseWithUrl(new Uint8Array(gotResult.rawBody), {
|
|
47
|
+
headers: responseHeaders,
|
|
48
|
+
status: gotResult.statusCode,
|
|
49
|
+
statusText: gotResult.statusMessage ?? '',
|
|
50
|
+
url: gotResult.url,
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
//# sourceMappingURL=index.js.map
|
package/index.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AAEvC,OAAO,EAAE,cAAc,EAA2B,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAChG,OAAO,EAAE,WAAW,EAAgB,MAAM,cAAc,CAAC;AAEzD;;GAEG;AACH,MAAM,OAAO,qBAAsB,SAAQ,cAAc;IACrD;;;OAGG;IACK,eAAe,CACnB,OAAgB;QAEhB,OAAO,CAAC,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,MAAO,CAAC,CAAC;IAC7D,CAAC;IAEO,CAAC,cAAc,CACnB,OAAsD;QAEtD,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;YACjD,IAAI,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,KAAK,KAAK,SAAS;gBAAE,SAAS;YACzD,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC;gBACvB,KAAK,MAAM,CAAC,IAAI,KAAK;oBAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC;YAC1C,CAAC;iBAAM,CAAC;gBACJ,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;YACvB,CAAC;QACL,CAAC;IACL,CAAC;IAEO,YAAY,CAAC,OAAsD;QACvE,OAAO,IAAI,OAAO,CAAC,CAAC,GAAG,IAAI,CAAC,cAAc,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;IAC1D,CAAC;IAEQ,KAAK,CAAC,KAAK,CAAC,OAAgB,EAAE,OAA0C;QAC7E,MAAM,EAAE,QAAQ,EAAE,QAAQ,EAAE,GAAG,OAAO,IAAI,EAAE,CAAC;QAE7C,IAAI,CAAC,IAAI,CAAC,eAAe,CAAC,OAAO,CAAC,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CAAC,wEAAwE,CAAC,CAAC;QAC9F,CAAC;QAED,MAAM,SAAS,GAAG,MAAM,WAAW,CAAC;YAChC,GAAG,EAAE,OAAO,CAAC,GAAI;YACjB,MAAM,EAAE,OAAO,CAAC,MAA2B;YAC3C,OAAO,EAAE,MAAM,CAAC,WAAW,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,EAAE,CAAC;YACtD,IAAI,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,OAAO,CAAC,IAAW,CAAC,CAAC,CAAC,CAAC,SAAS;YACtE,QAAQ;YACR,MAAM,EAAE,OAAO,EAAE,MAAM,IAAI,SAAS;YACpC,cAAc,EAAE,QAAQ,KAAK,QAAQ;SACxC,CAAC,CAAC;QAEH,MAAM,eAAe,GAAG,IAAI,CAAC,YAAY,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC;QAE7D,OAAO,IAAI,eAAe,CAAC,IAAI,UAAU,CAAC,SAAS,CAAC,OAAO,CAAC,EAAE;YAC1D,OAAO,EAAE,eAAe;YACxB,MAAM,EAAE,SAAS,CAAC,UAAU;YAC5B,UAAU,EAAE,SAAS,CAAC,aAAa,IAAI,EAAE;YACzC,GAAG,EAAE,SAAS,CAAC,GAAG;SACrB,CAAC,CAAC;IACP,CAAC;CACJ"}
|
package/package.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@crawlee/got-scraping-client",
|
|
3
|
+
"version": "4.0.0-beta.23",
|
|
4
|
+
"description": "The scalable web crawling and scraping library for JavaScript/Node.js. Enables development of data extraction and web automation jobs (not only) with headless Chrome and Puppeteer.",
|
|
5
|
+
"engines": {
|
|
6
|
+
"node": ">=22.0.0"
|
|
7
|
+
},
|
|
8
|
+
"type": "module",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": "./index.js",
|
|
11
|
+
"./package.json": "./package.json"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"apify",
|
|
15
|
+
"headless",
|
|
16
|
+
"chrome",
|
|
17
|
+
"puppeteer",
|
|
18
|
+
"crawler",
|
|
19
|
+
"scraper"
|
|
20
|
+
],
|
|
21
|
+
"author": {
|
|
22
|
+
"name": "Apify",
|
|
23
|
+
"email": "support@apify.com",
|
|
24
|
+
"url": "https://apify.com"
|
|
25
|
+
},
|
|
26
|
+
"contributors": [
|
|
27
|
+
"Jan Curn <jan@apify.com>",
|
|
28
|
+
"Marek Trunkat <marek@apify.com>",
|
|
29
|
+
"Ondra Urban <ondra@apify.com>"
|
|
30
|
+
],
|
|
31
|
+
"license": "Apache-2.0",
|
|
32
|
+
"repository": {
|
|
33
|
+
"type": "git",
|
|
34
|
+
"url": "git+https://github.com/apify/crawlee"
|
|
35
|
+
},
|
|
36
|
+
"bugs": {
|
|
37
|
+
"url": "https://github.com/apify/crawlee/issues"
|
|
38
|
+
},
|
|
39
|
+
"homepage": "https://crawlee.dev",
|
|
40
|
+
"scripts": {
|
|
41
|
+
"build": "yarn clean && yarn compile && yarn copy",
|
|
42
|
+
"clean": "rimraf ./dist",
|
|
43
|
+
"compile": "tsc -p tsconfig.build.json",
|
|
44
|
+
"copy": "tsx ../../scripts/copy.ts"
|
|
45
|
+
},
|
|
46
|
+
"publishConfig": {
|
|
47
|
+
"access": "public"
|
|
48
|
+
},
|
|
49
|
+
"dependencies": {
|
|
50
|
+
"@crawlee/http-client": "4.0.0-beta.23",
|
|
51
|
+
"got-scraping": "^4.1.3"
|
|
52
|
+
},
|
|
53
|
+
"lerna": {
|
|
54
|
+
"command": {
|
|
55
|
+
"publish": {
|
|
56
|
+
"assets": []
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
"gitHead": "1d348c0c13605e8c4749727419f2c01d1162d642"
|
|
61
|
+
}
|