ultimate-pi 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.agents/skills/caveman/SKILL.md +67 -0
  2. package/.agents/skills/compress/SKILL.md +111 -0
  3. package/.agents/skills/compress/scripts/__init__.py +9 -0
  4. package/.agents/skills/compress/scripts/__main__.py +3 -0
  5. package/.agents/skills/compress/scripts/benchmark.py +78 -0
  6. package/.agents/skills/compress/scripts/cli.py +73 -0
  7. package/.agents/skills/compress/scripts/compress.py +227 -0
  8. package/.agents/skills/compress/scripts/detect.py +121 -0
  9. package/.agents/skills/compress/scripts/validate.py +189 -0
  10. package/.agents/skills/context7-cli/SKILL.md +73 -0
  11. package/.agents/skills/context7-cli/references/docs.md +121 -0
  12. package/.agents/skills/context7-cli/references/setup.md +43 -0
  13. package/.agents/skills/context7-cli/references/skills.md +118 -0
  14. package/.agents/skills/emil-design-eng/SKILL.md +679 -0
  15. package/.agents/skills/lean-ctx/SKILL.md +149 -0
  16. package/.agents/skills/lean-ctx/scripts/install.sh +95 -0
  17. package/.agents/skills/scrapling-official/LICENSE.txt +28 -0
  18. package/.agents/skills/scrapling-official/SKILL.md +390 -0
  19. package/.agents/skills/scrapling-official/examples/01_fetcher_session.py +26 -0
  20. package/.agents/skills/scrapling-official/examples/02_dynamic_session.py +26 -0
  21. package/.agents/skills/scrapling-official/examples/03_stealthy_session.py +26 -0
  22. package/.agents/skills/scrapling-official/examples/04_spider.py +58 -0
  23. package/.agents/skills/scrapling-official/examples/README.md +45 -0
  24. package/.agents/skills/scrapling-official/references/fetching/choosing.md +78 -0
  25. package/.agents/skills/scrapling-official/references/fetching/dynamic.md +352 -0
  26. package/.agents/skills/scrapling-official/references/fetching/static.md +432 -0
  27. package/.agents/skills/scrapling-official/references/fetching/stealthy.md +255 -0
  28. package/.agents/skills/scrapling-official/references/mcp-server.md +214 -0
  29. package/.agents/skills/scrapling-official/references/migrating_from_beautifulsoup.md +86 -0
  30. package/.agents/skills/scrapling-official/references/parsing/adaptive.md +212 -0
  31. package/.agents/skills/scrapling-official/references/parsing/main_classes.md +586 -0
  32. package/.agents/skills/scrapling-official/references/parsing/selection.md +494 -0
  33. package/.agents/skills/scrapling-official/references/spiders/advanced.md +344 -0
  34. package/.agents/skills/scrapling-official/references/spiders/architecture.md +94 -0
  35. package/.agents/skills/scrapling-official/references/spiders/getting-started.md +164 -0
  36. package/.agents/skills/scrapling-official/references/spiders/proxy-blocking.md +235 -0
  37. package/.agents/skills/scrapling-official/references/spiders/requests-responses.md +196 -0
  38. package/.agents/skills/scrapling-official/references/spiders/sessions.md +205 -0
  39. package/.github/banner.png +0 -0
  40. package/.pi/SYSTEM.md +40 -0
  41. package/.pi/settings.json +5 -0
  42. package/PLAN.md +11 -0
  43. package/README.md +58 -0
  44. package/extensions/lean-ctx-enforce.ts +166 -0
  45. package/package.json +17 -0
  46. package/skills-lock.json +35 -0
  47. package/wiki/README.md +10 -0
  48. package/wiki/decisions/0001-establish-project-wiki-and-decision-record-format.md +25 -0
  49. package/wiki/decisions/0002-add-project-banner-to-readme.md +26 -0
  50. package/wiki/decisions/0003-remove-redundant-readme-title-heading.md +26 -0
  51. package/wiki/decisions/0004-publish-package-to-npm-as-ultimate-pi.md +26 -0
@@ -0,0 +1,26 @@
1
+ """
2
+ Example 1: Python - FetcherSession (persistent HTTP session with Chrome TLS fingerprint)
3
+
4
+ Scrapes all 10 pages of quotes.toscrape.com using a single HTTP session.
5
+ No browser launched - fast and lightweight.
6
+
7
+ Best for: static or semi-static sites, APIs, pages that don't require JavaScript.
8
+ """
9
+
10
+ from scrapling.fetchers import FetcherSession
11
+
12
+ all_quotes = []
13
+
14
+ with FetcherSession(impersonate="chrome") as session:
15
+ for i in range(1, 11):
16
+ page = session.get(
17
+ f"https://quotes.toscrape.com/page/{i}/",
18
+ stealthy_headers=True,
19
+ )
20
+ quotes = page.css(".quote .text::text").getall()
21
+ all_quotes.extend(quotes)
22
+ print(f"Page {i}: {len(quotes)} quotes (status {page.status})")
23
+
24
+ print(f"\nTotal: {len(all_quotes)} quotes\n")
25
+ for i, quote in enumerate(all_quotes, 1):
26
+ print(f"{i:>3}. {quote}")
@@ -0,0 +1,26 @@
1
+ """
2
+ Example 2: Python - DynamicSession (Playwright browser automation, visible)
3
+
4
+ Scrapes all 10 pages of quotes.toscrape.com using a persistent browser session.
5
+ The browser window stays open across all page requests for efficiency.
6
+
7
+ Best for: JavaScript-heavy pages, SPAs, sites with dynamic content loading.
8
+
9
+ Set headless=True to run the browser hidden.
10
+ Set disable_resources=True to skip loading images/fonts for a speed boost.
11
+ """
12
+
13
+ from scrapling.fetchers import DynamicSession
14
+
15
+ all_quotes = []
16
+
17
+ with DynamicSession(headless=False, disable_resources=True) as session:
18
+ for i in range(1, 11):
19
+ page = session.fetch(f"https://quotes.toscrape.com/page/{i}/")
20
+ quotes = page.css(".quote .text::text").getall()
21
+ all_quotes.extend(quotes)
22
+ print(f"Page {i}: {len(quotes)} quotes (status {page.status})")
23
+
24
+ print(f"\nTotal: {len(all_quotes)} quotes\n")
25
+ for i, quote in enumerate(all_quotes, 1):
26
+ print(f"{i:>3}. {quote}")
@@ -0,0 +1,26 @@
1
+ """
2
+ Example 3: Python - StealthySession (Patchright stealth browser, visible)
3
+
4
+ Scrapes all 10 pages of quotes.toscrape.com using a persistent stealth browser session.
5
+ Bypasses anti-bot protections automatically (Cloudflare Turnstile, fingerprinting, etc.).
6
+
7
+ Best for: well-protected sites, Cloudflare-gated pages, sites that detect Playwright.
8
+
9
+ Set headless=True to run the browser hidden.
10
+ Add solve_cloudflare=True to auto-solve Cloudflare challenges.
11
+ """
12
+
13
+ from scrapling.fetchers import StealthySession
14
+
15
+ all_quotes = []
16
+
17
+ with StealthySession(headless=False) as session:
18
+ for i in range(1, 11):
19
+ page = session.fetch(f"https://quotes.toscrape.com/page/{i}/")
20
+ quotes = page.css(".quote .text::text").getall()
21
+ all_quotes.extend(quotes)
22
+ print(f"Page {i}: {len(quotes)} quotes (status {page.status})")
23
+
24
+ print(f"\nTotal: {len(all_quotes)} quotes\n")
25
+ for i, quote in enumerate(all_quotes, 1):
26
+ print(f"{i:>3}. {quote}")
@@ -0,0 +1,58 @@
1
+ """
2
+ Example 4: Python - Spider (auto-crawling framework)
3
+
4
+ Scrapes ALL pages of quotes.toscrape.com by following "Next" pagination links
5
+ automatically. No manual page looping needed.
6
+
7
+ The spider yields structured items (text + author + tags) and exports them to JSON.
8
+
9
+ Best for: multi-page crawls, full-site scraping, anything needing pagination or
10
+ link following across many pages.
11
+
12
+ Outputs:
13
+ - Live stats to terminal during crawl
14
+ - Final crawl stats at the end
15
+ - quotes.json in the current directory
16
+ """
17
+
18
+ from scrapling.spiders import Spider, Response
19
+
20
+
21
+ class QuotesSpider(Spider):
22
+ name = "quotes"
23
+ start_urls = ["https://quotes.toscrape.com/"]
24
+ concurrent_requests = 5 # Fetch up to 5 pages at once
25
+
26
+ async def parse(self, response: Response):
27
+ # Extract all quotes on the current page
28
+ for quote in response.css(".quote"):
29
+ yield {
30
+ "text": quote.css(".text::text").get(),
31
+ "author": quote.css(".author::text").get(),
32
+ "tags": quote.css(".tags .tag::text").getall(),
33
+ }
34
+
35
+ # Follow the "Next" button to the next page (if it exists)
36
+ next_page = response.css(".next a")
37
+ if next_page:
38
+ yield response.follow(next_page[0].attrib["href"])
39
+
40
+
41
+ if __name__ == "__main__":
42
+ result = QuotesSpider().start()
43
+
44
+ print(f"\n{'=' * 50}")
45
+ print(f"Scraped : {result.stats.items_scraped} quotes")
46
+ print(f"Requests: {result.stats.requests_count}")
47
+ print(f"Time : {result.stats.elapsed_seconds:.2f}s")
48
+ print(f"Speed : {result.stats.requests_per_second:.2f} req/s")
49
+ print(f"{'=' * 50}\n")
50
+
51
+ for i, item in enumerate(result.items, 1):
52
+ print(f"{i:>3}. [{item['author']}] {item['text']}")
53
+ if item["tags"]:
54
+ print(f" Tags: {', '.join(item['tags'])}")
55
+
56
+ # Export to JSON
57
+ result.items.to_json("quotes.json", indent=True)
58
+ print("\nExported to quotes.json")
@@ -0,0 +1,45 @@
1
+ # Scrapling Examples
2
+
3
+ These examples scrape [quotes.toscrape.com](https://quotes.toscrape.com) - a safe, purpose-built scraping sandbox - and demonstrate every tool available in Scrapling, from plain HTTP to full browser automation and spiders.
4
+
5
+ All examples collect **all 100 quotes across 10 pages**.
6
+
7
+ ## Quick Start
8
+
9
+ Make sure Scrapling is installed:
10
+
11
+ ```bash
12
+ pip install "scrapling[all]>=0.4.7"
13
+ scrapling install --force
14
+ ```
15
+
16
+ ## Examples
17
+
18
+ | File | Tool | Type | Best For |
19
+ |--------------------------|-------------------|-----------------------------|---------------------------------------|
20
+ | `01_fetcher_session.py` | `FetcherSession` | Python - persistent HTTP | APIs, fast multi-page scraping |
21
+ | `02_dynamic_session.py` | `DynamicSession` | Python - browser automation | Dynamic/SPA pages |
22
+ | `03_stealthy_session.py` | `StealthySession` | Python - stealth browser | Cloudflare, fingerprint bypass |
23
+ | `04_spider.py` | `Spider` | Python - auto-crawling | Multi-page crawls, full-site scraping |
24
+
25
+ ## Running
26
+
27
+ **Python scripts:**
28
+
29
+ ```bash
30
+ python examples/01_fetcher_session.py
31
+ python examples/02_dynamic_session.py # Opens a visible browser
32
+ python examples/03_stealthy_session.py # Opens a visible stealth browser
33
+ python examples/04_spider.py # Auto-crawls all pages, exports quotes.json
34
+ ```
35
+
36
+ ## Escalation Guide
37
+
38
+ Start with the fastest, lightest option and escalate only if needed:
39
+
40
+ ```
41
+ get / FetcherSession
42
+ └─ If JS required → fetch / DynamicSession
43
+ └─ If blocked → stealthy-fetch / StealthySession
44
+ └─ If multi-page → Spider
45
+ ```
@@ -0,0 +1,78 @@
1
+ # Fetchers basics
2
+
3
+ ## Introduction
4
+ Fetchers are classes that do requests or fetch pages in a single-line fashion with many features and return a [Response](#response-object) object. All fetchers have separate session classes to keep the session running (e.g., a browser fetcher keeps the browser open until you finish all requests).
5
+
6
+ Fetchers are not wrappers built on top of other libraries. They use these libraries as an engine to request/fetch pages but add features the underlying engines don't have, while still fully leveraging and optimizing them for web scraping.
7
+
8
+ ## Fetchers Overview
9
+
10
+ Scrapling provides three different fetcher classes with their session classes; each fetcher is designed for a specific use case.
11
+
12
+ The following table compares them and can be quickly used for guidance.
13
+
14
+
15
+ | Feature | Fetcher | DynamicFetcher | StealthyFetcher |
16
+ |--------------------|---------------------------------------------------|-----------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
17
+ | Relative speed | 🐇🐇🐇🐇🐇 | 🐇🐇🐇 | 🐇🐇🐇 |
18
+ | Stealth | ⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐⭐⭐ |
19
+ | Anti-Bot options | ⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐⭐⭐ |
20
+ | JavaScript loading | ❌ | ✅ | ✅ |
21
+ | Memory Usage | ⭐ | ⭐⭐⭐ | ⭐⭐⭐ |
22
+ | Best used for | Basic scraping when HTTP requests alone can do it | - Dynamically loaded websites <br/>- Small automation<br/>- Small-Mid protections | - Dynamically loaded websites <br/>- Small automation <br/>- Small-Complicated protections |
23
+ | Browser(s) | ❌ | Chromium and Google Chrome | Chromium and Google Chrome |
24
+ | Browser API used | ❌ | PlayWright | PlayWright |
25
+ | Setup Complexity | Simple | Simple | Simple |
26
+
27
+ ## Parser configuration in all fetchers
28
+ All fetchers share the same import method, as you will see in the upcoming pages
29
+ ```python
30
+ >>> from scrapling.fetchers import Fetcher, AsyncFetcher, StealthyFetcher, DynamicFetcher
31
+ ```
32
+ Then you use it right away without initializing like this, and it will use the default parser settings:
33
+ ```python
34
+ >>> page = StealthyFetcher.fetch('https://example.com')
35
+ ```
36
+ If you want to configure the parser ([Selector class](parsing/main_classes.md#selector)) that will be used on the response before returning it for you, then do this first:
37
+ ```python
38
+ >>> from scrapling.fetchers import Fetcher
39
+ >>> Fetcher.configure(adaptive=True, keep_comments=False, keep_cdata=False) # and the rest
40
+ ```
41
+ or
42
+ ```python
43
+ >>> from scrapling.fetchers import Fetcher
44
+ >>> Fetcher.adaptive=True
45
+ >>> Fetcher.keep_comments=False
46
+ >>> Fetcher.keep_cdata=False # and the rest
47
+ ```
48
+ Then, continue your code as usual.
49
+
50
+ The available configuration arguments are: `adaptive`, `adaptive_domain`, `huge_tree`, `keep_comments`, `keep_cdata`, `storage`, and `storage_args`, which are the same ones you give to the [Selector](parsing/main_classes.md#selector) class. You can display the current configuration anytime by running `<fetcher_class>.display_config()`.
51
+
52
+ **Info:** The `adaptive` argument is disabled by default; you must enable it to use that feature.
53
+
54
+ ### Set parser config per request
55
+ As you probably understand, the logic above for setting the parser config will apply globally to all requests/fetches made through that class, and it's intended for simplicity.
56
+
57
+ If your use case requires a different configuration for each request/fetch, you can pass a dictionary to the request method (`fetch`/`get`/`post`/...) to an argument named `selector_config`.
58
+
59
+ ## Response Object
60
+ The `Response` object is the same as the [Selector](parsing/main_classes.md#selector) class, but it has additional details about the response, like response headers, status, cookies, etc., as shown below:
61
+ ```python
62
+ >>> from scrapling.fetchers import Fetcher
63
+ >>> page = Fetcher.get('https://example.com')
64
+
65
+ >>> page.status # HTTP status code
66
+ >>> page.reason # Status message
67
+ >>> page.cookies # Response cookies as a dictionary
68
+ >>> page.headers # Response headers
69
+ >>> page.request_headers # Request headers
70
+ >>> page.history # Response history of redirections, if any
71
+ >>> page.body # Raw response body as bytes
72
+ >>> page.encoding # Response encoding
73
+ >>> page.meta # Response metadata dictionary (e.g., proxy used). Mainly helpful with the spiders system.
74
+ >>> page.captured_xhr # List of captured XHR/fetch responses (when capture_xhr is enabled on a browser session)
75
+ ```
76
+ All fetchers return the `Response` object.
77
+
78
+ **Note:** Unlike the [Selector](parsing/main_classes.md#selector) class, the `Response` class's body is always bytes since v0.4.
@@ -0,0 +1,352 @@
1
+ # Fetching dynamic websites
2
+
3
+ `DynamicFetcher` (formerly `PlayWrightFetcher`) provides flexible browser automation with multiple configuration options and built-in stealth improvements.
4
+
5
+ As we will explain later, to automate the page, you need some knowledge of [Playwright's Page API](https://playwright.dev/python/docs/api/class-page).
6
+
7
+ ## Basic Usage
8
+ You have one primary way to import this Fetcher, which is the same for all fetchers.
9
+
10
+ ```python
11
+ >>> from scrapling.fetchers import DynamicFetcher
12
+ ```
13
+ Check out how to configure the parsing options [here](choosing.md#parser-configuration-in-all-fetchers)
14
+
15
+ **Note:** The async version of the `fetch` method is `async_fetch`.
16
+
17
+ This fetcher provides three main run options that can be combined as desired.
18
+
19
+ Which are:
20
+
21
+ ### 1. Vanilla Playwright
22
+ ```python
23
+ DynamicFetcher.fetch('https://example.com')
24
+ ```
25
+ Using it in that manner will open a Chromium browser and load the page. There are optimizations for speed, and some stealth goes automatically under the hood, but other than that, there are no tricks or extra features unless you enable some; it's just a plain PlayWright API.
26
+
27
+ ### 2. Real Chrome
28
+ ```python
29
+ DynamicFetcher.fetch('https://example.com', real_chrome=True)
30
+ ```
31
+ If you have a Google Chrome browser installed, use this option. It's the same as the first option, but it will use the Google Chrome browser you installed on your device instead of Chromium. This will make your requests look more authentic, so they're less detectable for better results.
32
+
33
+ If you don't have Google Chrome installed and want to use this option, you can use the command below in the terminal to install it for the library instead of installing it manually:
34
+ ```commandline
35
+ playwright install chrome
36
+ ```
37
+
38
+ ### 3. CDP Connection
39
+ ```python
40
+ DynamicFetcher.fetch('https://example.com', cdp_url='ws://localhost:9222')
41
+ ```
42
+ Instead of launching a browser locally (Chromium/Google Chrome), you can connect to a remote browser through the [Chrome DevTools Protocol](https://chromedevtools.github.io/devtools-protocol/).
43
+
44
+
45
+ **Notes:**
46
+ * There was a `stealth` option here, but it was moved to the `StealthyFetcher` class, as explained on the next page, with additional features since version 0.3.13.
47
+ * This makes it less confusing for new users, easier to maintain, and provides other benefits, as explained on the [StealthyFetcher page](stealthy.md).
48
+
49
+ ## Full list of arguments
50
+ All arguments for `DynamicFetcher` and its session classes:
51
+
52
+ | Argument | Description | Optional |
53
+ |:-------------------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------:|
54
+ | url | Target url | ❌ |
55
+ | headless | Pass `True` to run the browser in headless/hidden (**default**) or `False` for headful/visible mode. | ✔️ |
56
+ | disable_resources | Drop requests for unnecessary resources for a speed boost. Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`. | ✔️ |
57
+ | cookies | Set cookies for the next request. | ✔️ |
58
+ | useragent | Pass a useragent string to be used. **Otherwise, the fetcher will generate and use a real Useragent of the same browser and version.** | ✔️ |
59
+ | network_idle | Wait for the page until there are no network connections for at least 500 ms. | ✔️ |
60
+ | load_dom | Enabled by default, wait for all JavaScript on page(s) to fully load and execute (wait for the `domcontentloaded` state). | ✔️ |
61
+ | timeout | The timeout (milliseconds) used in all operations and waits through the page. The default is 30,000 ms (30 seconds). | ✔️ |
62
+ | wait | The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the `Response` object. | ✔️ |
63
+ | page_action | Added for automation. Pass a function that takes the `page` object, runs after navigation, and does the necessary automation. | ✔️ |
64
+ | page_setup | A function that takes the `page` object, runs before navigation. Use it to register event listeners or routes that must be set up before the page loads. | ✔️ |
65
+ | wait_selector | Wait for a specific css selector to be in a specific state. | ✔️ |
66
+ | init_script | An absolute path to a JavaScript file to be executed on page creation for all pages in this session. | ✔️ |
67
+ | wait_selector_state | Scrapling will wait for the given state to be fulfilled for the selector given with `wait_selector`. _Default state is `attached`._ | ✔️ |
68
+ | google_search | Enabled by default, Scrapling will set a Google referer header. | ✔️ |
69
+ | extra_headers | A dictionary of extra headers to add to the request. _The referer set by `google_search` takes priority over the referer set here if used together._ | ✔️ |
70
+ | proxy | The proxy to be used with requests. It can be a string or a dictionary with only the keys 'server', 'username', and 'password'. | ✔️ |
71
+ | real_chrome | If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch and use an instance of your browser. | ✔️ |
72
+ | locale | Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect `navigator.language` value, `Accept-Language` request header value, as well as number and date formatting rules. Defaults to the system default locale. | ✔️ |
73
+ | timezone_id | Changes the timezone of the browser. Defaults to the system timezone. | ✔️ |
74
+ | cdp_url | Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP. | ✔️ |
75
+ | user_data_dir | Path to a User Data Directory, which stores browser session data like cookies and local storage. The default is to create a temporary directory. **Only Works with sessions** | ✔️ |
76
+ | extra_flags | A list of additional browser flags to pass to the browser on launch. | ✔️ |
77
+ | additional_args | Additional arguments to be passed to Playwright's context as additional settings, and they take higher priority than Scrapling's settings. | ✔️ |
78
+ | selector_config | A dictionary of custom parsing arguments to be used when creating the final `Selector`/`Response` class. | ✔️ |
79
+ | blocked_domains | A set of domain names to block requests to. Subdomains are also matched (e.g., `"example.com"` blocks `"sub.example.com"` too). | ✔️ |
80
+ | block_ads | Block requests to ~3,500 known ad/tracking domains. Can be combined with `blocked_domains`. | ✔️ |
81
+ | dns_over_https | Route DNS queries through Cloudflare's DNS-over-HTTPS to prevent DNS leaks when using proxies. | ✔️ |
82
+ | proxy_rotator | A `ProxyRotator` instance for automatic proxy rotation. Cannot be combined with `proxy`. | ✔️ |
83
+ | retries | Number of retry attempts for failed requests. Defaults to 3. | ✔️ |
84
+ | retry_delay | Seconds to wait between retry attempts. Defaults to 1. | ✔️ |
85
+ | capture_xhr | Pass a regex URL pattern string to capture XHR/fetch requests matching it during page load. Captured responses are available via `response.captured_xhr`. Defaults to `None` (disabled). | ✔️ |
86
+ | executable_path | Absolute path to a custom browser executable to use instead of the bundled Chromium. Useful for non-standard installations or custom browser builds. | ✔️ |
87
+
88
+ In session classes, all these arguments can be set globally for the session. Still, you can configure each request individually by passing some of the arguments here that can be configured on the browser tab level like: `google_search`, `timeout`, `wait`, `page_action`, `page_setup`, `extra_headers`, `disable_resources`, `wait_selector`, `wait_selector_state`, `network_idle`, `load_dom`, `blocked_domains`, `proxy`, and `selector_config`.
89
+
90
+ **Notes:**
91
+ 1. The `disable_resources` option made requests ~25% faster in tests for some websites and can help save proxy usage, but be careful with it, as it can cause some websites to never finish loading.
92
+ 2. The `google_search` argument is enabled by default for all requests, setting the referer to `https://www.google.com/`. If used together with `extra_headers`, it takes priority over the referer set there.
93
+ 3. Since version 0.3.13, the `stealth` option has been removed here in favor of the `StealthyFetcher` class, and the `hide_canvas` option has been moved to it. The `disable_webgl` argument has been moved to the `StealthyFetcher` class and renamed as `allow_webgl`.
94
+ 4. If you didn't set a user agent and enabled headless mode, the fetcher will generate a real user agent for the same browser version and use it. If you didn't set a user agent and didn't enable headless mode, the fetcher will use the browser's default user agent, which is the same as in standard browsers in the latest versions.
95
+
96
+
97
+ ## Examples
98
+
99
+ ### Resource Control
100
+
101
+ ```python
102
+ # Disable unnecessary resources
103
+ page = DynamicFetcher.fetch('https://example.com', disable_resources=True) # Blocks fonts, images, media, etc.
104
+ ```
105
+
106
+ ### Domain Blocking
107
+
108
+ ```python
109
+ # Block requests to specific domains (and their subdomains)
110
+ page = DynamicFetcher.fetch('https://example.com', blocked_domains={"ads.example.com", "tracker.net"})
111
+ ```
112
+
113
+ ### Network Control
114
+
115
+ ```python
116
+ # Wait for network idle (Consider fetch to be finished when there are no network connections for at least 500 ms)
117
+ page = DynamicFetcher.fetch('https://example.com', network_idle=True)
118
+
119
+ # Custom timeout (in milliseconds)
120
+ page = DynamicFetcher.fetch('https://example.com', timeout=30000) # 30 seconds
121
+
122
+ # Proxy support (It can also be a dictionary with only the keys 'server', 'username', and 'password'.)
123
+ page = DynamicFetcher.fetch('https://example.com', proxy='http://username:password@host:port')
124
+ ```
125
+
126
+ ### Proxy Rotation
127
+
128
+ ```python
129
+ from scrapling.fetchers import DynamicSession, ProxyRotator
130
+
131
+ # Set up proxy rotation
132
+ rotator = ProxyRotator([
133
+ "http://proxy1:8080",
134
+ "http://proxy2:8080",
135
+ "http://proxy3:8080",
136
+ ])
137
+
138
+ # Use with session - rotates proxy automatically with each request
139
+ with DynamicSession(proxy_rotator=rotator, headless=True) as session:
140
+ page1 = session.fetch('https://example1.com')
141
+ page2 = session.fetch('https://example2.com')
142
+
143
+ # Override rotator for a specific request
144
+ page3 = session.fetch('https://example3.com', proxy='http://specific-proxy:8080')
145
+ ```
146
+
147
+ **Warning:** By default, all browser-based fetchers and sessions use a persistent browser context with a pool of tabs. However, since browsers can't set a proxy per tab, when you use a `ProxyRotator`, the fetcher will automatically open a separate context for each proxy, with one tab per context. Once the tab's job is done, both the tab and its context are closed.
148
+
149
+ ### Downloading Files
150
+
151
+ ```python
152
+ page = DynamicFetcher.fetch('https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/main_cover.png')
153
+
154
+ with open(file='main_cover.png', mode='wb') as f:
155
+ f.write(page.body)
156
+ ```
157
+
158
+ The `body` attribute of the `Response` object always returns `bytes`.
159
+
160
+ ### Pre-Navigation Setup
161
+ If you need to set up event listeners, routes, or scripts that must be registered before the page navigates, use `page_setup`. This function receives the `page` object and runs before `page.goto()` is called.
162
+
163
+ ```python
164
+ from playwright.sync_api import Page
165
+
166
+ def capture_websockets(page: Page):
167
+ page.on("websocket", lambda ws: print(f"WebSocket opened: {ws.url}"))
168
+
169
+ page = DynamicFetcher.fetch('https://example.com', page_setup=capture_websockets)
170
+ ```
171
+ Async version:
172
+ ```python
173
+ from playwright.async_api import Page
174
+
175
+ async def capture_websockets(page: Page):
176
+ page.on("websocket", lambda ws: print(f"WebSocket opened: {ws.url}"))
177
+
178
+ page = await DynamicFetcher.async_fetch('https://example.com', page_setup=capture_websockets)
179
+ ```
180
+
181
+ You can combine it with `page_action` -- `page_setup` runs before navigation, `page_action` runs after.
182
+
183
+ ### Browser Automation
184
+ This is where your knowledge about [Playwright's Page API](https://playwright.dev/python/docs/api/class-page) comes into play. The function you pass here takes the page object from Playwright's API, performs the desired action, and then the fetcher continues.
185
+
186
+ This function is executed immediately after waiting for `network_idle` (if enabled) and before waiting for the `wait_selector` argument, allowing it to be used for purposes beyond automation. You can alter the page as you want.
187
+
188
+ In the example below, I used the pages' [mouse events](https://playwright.dev/python/docs/api/class-mouse) to scroll the page with the mouse wheel, then move the mouse.
189
+ ```python
190
+ from playwright.sync_api import Page
191
+
192
+ def scroll_page(page: Page):
193
+ page.mouse.wheel(10, 0)
194
+ page.mouse.move(100, 400)
195
+ page.mouse.up()
196
+
197
+ page = DynamicFetcher.fetch('https://example.com', page_action=scroll_page)
198
+ ```
199
+ Of course, if you use the async fetch version, the function must also be async.
200
+ ```python
201
+ from playwright.async_api import Page
202
+
203
+ async def scroll_page(page: Page):
204
+ await page.mouse.wheel(10, 0)
205
+ await page.mouse.move(100, 400)
206
+ await page.mouse.up()
207
+
208
+ page = await DynamicFetcher.async_fetch('https://example.com', page_action=scroll_page)
209
+ ```
210
+
211
+ ### Wait Conditions
212
+
213
+ ```python
214
+ # Wait for the selector
215
+ page = DynamicFetcher.fetch(
216
+ 'https://example.com',
217
+ wait_selector='h1',
218
+ wait_selector_state='visible'
219
+ )
220
+ ```
221
+ This is the last wait the fetcher will do before returning the response (if enabled). You pass a CSS selector to the `wait_selector` argument, and the fetcher will wait for the state you passed in the `wait_selector_state` argument to be fulfilled. If you didn't pass a state, the default would be `attached`, which means it will wait for the element to be present in the DOM.
222
+
223
+ After that, if `load_dom` is enabled (the default), the fetcher will check again to see if all JavaScript files are loaded and executed (in the `domcontentloaded` state) or continue waiting. If you have enabled `network_idle`, the fetcher will wait for `network_idle` to be fulfilled again, as explained above.
224
+
225
+ The states the fetcher can wait for can be any of the following ([source](https://playwright.dev/python/docs/api/class-page#page-wait-for-selector)):
226
+
227
+ - `attached`: Wait for an element to be present in the DOM.
228
+ - `detached`: Wait for an element to not be present in the DOM.
229
+ - `visible`: wait for an element to have a non-empty bounding box and no `visibility:hidden`. Note that an element without any content or with `display:none` has an empty bounding box and is not considered visible.
230
+ - `hidden`: wait for an element to be either detached from the DOM, or have an empty bounding box, or `visibility:hidden`. This is opposite to the `'visible'` option.
231
+
232
+ ### Capturing XHR/Fetch Requests
233
+
234
+ Many SPAs load data through background API calls (XHR/fetch). You can capture these requests by passing a regex URL pattern to `capture_xhr` at the session level:
235
+
236
+ ```python
237
+ from scrapling.fetchers import DynamicSession
238
+
239
+ with DynamicSession(capture_xhr=r"https://api\.example\.com/.*", headless=True) as session:
240
+ page = session.fetch('https://example.com')
241
+
242
+ # Access captured XHR responses
243
+ for xhr in page.captured_xhr:
244
+ print(xhr.url, xhr.status)
245
+ print(xhr.body) # Raw response body as bytes
246
+ ```
247
+
248
+ Each item in `captured_xhr` is a full `Response` object with the same properties (`.url`, `.status`, `.headers`, `.body`, etc.). When `capture_xhr` is not set or is `None`, `captured_xhr` is an empty list.
249
+
250
+ ### Some Stealth Features
251
+
252
+ ```python
253
+ page = DynamicFetcher.fetch(
254
+ 'https://example.com',
255
+ google_search=True,
256
+ useragent='Mozilla/5.0...', # Custom user agent
257
+ locale='en-US', # Set browser locale
258
+ )
259
+ ```
260
+
261
+ ### General example
262
+ ```python
263
+ from scrapling.fetchers import DynamicFetcher
264
+
265
+ def scrape_dynamic_content():
266
+ # Use Playwright for JavaScript content
267
+ page = DynamicFetcher.fetch(
268
+ 'https://example.com/dynamic',
269
+ network_idle=True,
270
+ wait_selector='.content'
271
+ )
272
+
273
+ # Extract dynamic content
274
+ content = page.css('.content')
275
+
276
+ return {
277
+ 'title': content.css('h1::text').get(),
278
+ 'items': [
279
+ item.text for item in content.css('.item')
280
+ ]
281
+ }
282
+ ```
283
+
284
+ ## Session Management
285
+
286
+ To keep the browser open until you make multiple requests with the same configuration, use `DynamicSession`/`AsyncDynamicSession` classes. Those classes can accept all the arguments that the `fetch` function can take, which enables you to specify a config for the entire session.
287
+
288
+ ```python
289
+ from scrapling.fetchers import DynamicSession
290
+
291
+ # Create a session with default configuration
292
+ with DynamicSession(
293
+ headless=True,
294
+ disable_resources=True,
295
+ real_chrome=True
296
+ ) as session:
297
+ # Make multiple requests with the same browser instance
298
+ page1 = session.fetch('https://example1.com')
299
+ page2 = session.fetch('https://example2.com')
300
+ page3 = session.fetch('https://dynamic-site.com')
301
+
302
+ # All requests reuse the same tab on the same browser instance
303
+ ```
304
+
305
+ ### Async Session Usage
306
+
307
+ ```python
308
+ import asyncio
309
+ from scrapling.fetchers import AsyncDynamicSession
310
+
311
+ async def scrape_multiple_sites():
312
+ async with AsyncDynamicSession(
313
+ network_idle=True,
314
+ timeout=30000,
315
+ max_pages=3
316
+ ) as session:
317
+ # Make async requests with shared browser configuration
318
+ pages = await asyncio.gather(
319
+ session.fetch('https://spa-app1.com'),
320
+ session.fetch('https://spa-app2.com'),
321
+ session.fetch('https://dynamic-content.com')
322
+ )
323
+ return pages
324
+ ```
325
+
326
+ You may have noticed the `max_pages` argument. This is a new argument that enables the fetcher to create a **rotating pool of Browser tabs**. Instead of using a single tab for all your requests, you set a limit on the maximum number of pages that can be displayed at once. With each request, the library will close all tabs that have finished their task and check if the number of the current tabs is lower than the maximum allowed number of pages/tabs, then:
327
+
328
+ 1. If you are within the allowed range, the fetcher will create a new tab for you, and then all is as normal.
329
+ 2. Otherwise, it will keep checking every subsecond if creating a new tab is allowed or not for 60 seconds, then raise `TimeoutError`. This can happen when the website you are fetching becomes unresponsive.
330
+
331
+ This logic allows for multiple URLs to be fetched at the same time in the same browser, which saves a lot of resources, but most importantly, is so fast :)
332
+
333
+ In versions 0.3 and 0.3.1, the pool was reusing finished tabs to save more resources/time. That logic proved flawed, as it's nearly impossible to protect pages/tabs from contamination by the previous configuration used in the request before this one.
334
+
335
+ ### Session Benefits
336
+
337
+ - **Browser reuse**: Much faster subsequent requests by reusing the same browser instance.
338
+ - **Cookie persistence**: Automatic cookie and session state handling as any browser does automatically.
339
+ - **Consistent fingerprint**: Same browser fingerprint across all requests.
340
+ - **Memory efficiency**: Better resource usage compared to launching new browsers with each fetch.
341
+
342
+ ## When to Use
343
+
344
+ Use DynamicFetcher when:
345
+
346
+ - Need browser automation
347
+ - Want multiple browser options
348
+ - Using a real Chrome browser
349
+ - Need custom browser config
350
+ - Want a few stealth options
351
+
352
+ If you want more stealth and control without much config, check out the [StealthyFetcher](stealthy.md).