scrapling 0.2.99__py3-none-any.whl → 0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. scrapling/__init__.py +18 -31
  2. scrapling/cli.py +818 -20
  3. scrapling/core/_html_utils.py +348 -0
  4. scrapling/core/_types.py +34 -17
  5. scrapling/core/ai.py +611 -0
  6. scrapling/core/custom_types.py +183 -100
  7. scrapling/core/mixins.py +27 -19
  8. scrapling/core/shell.py +647 -0
  9. scrapling/core/{storage_adaptors.py → storage.py} +41 -33
  10. scrapling/core/translator.py +20 -26
  11. scrapling/core/utils.py +49 -54
  12. scrapling/engines/__init__.py +15 -6
  13. scrapling/engines/_browsers/__init__.py +2 -0
  14. scrapling/engines/_browsers/_camoufox.py +745 -0
  15. scrapling/engines/_browsers/_config_tools.py +130 -0
  16. scrapling/engines/_browsers/_controllers.py +630 -0
  17. scrapling/engines/_browsers/_page.py +93 -0
  18. scrapling/engines/_browsers/_validators.py +150 -0
  19. scrapling/engines/constants.py +101 -88
  20. scrapling/engines/static.py +667 -110
  21. scrapling/engines/toolbelt/__init__.py +20 -6
  22. scrapling/engines/toolbelt/bypasses/playwright_fingerprint.js +2 -1
  23. scrapling/engines/toolbelt/convertor.py +254 -0
  24. scrapling/engines/toolbelt/custom.py +158 -175
  25. scrapling/engines/toolbelt/fingerprints.py +32 -46
  26. scrapling/engines/toolbelt/navigation.py +68 -39
  27. scrapling/fetchers.py +227 -333
  28. scrapling/parser.py +781 -449
  29. scrapling-0.3.dist-info/METADATA +409 -0
  30. scrapling-0.3.dist-info/RECORD +41 -0
  31. {scrapling-0.2.99.dist-info → scrapling-0.3.dist-info}/WHEEL +1 -1
  32. {scrapling-0.2.99.dist-info → scrapling-0.3.dist-info}/top_level.txt +0 -1
  33. scrapling/defaults.py +0 -25
  34. scrapling/engines/camo.py +0 -339
  35. scrapling/engines/pw.py +0 -465
  36. scrapling/engines/toolbelt/bypasses/pdf_viewer.js +0 -5
  37. scrapling-0.2.99.dist-info/METADATA +0 -290
  38. scrapling-0.2.99.dist-info/RECORD +0 -49
  39. tests/__init__.py +0 -1
  40. tests/fetchers/__init__.py +0 -1
  41. tests/fetchers/async/__init__.py +0 -0
  42. tests/fetchers/async/test_camoufox.py +0 -97
  43. tests/fetchers/async/test_httpx.py +0 -85
  44. tests/fetchers/async/test_playwright.py +0 -101
  45. tests/fetchers/sync/__init__.py +0 -0
  46. tests/fetchers/sync/test_camoufox.py +0 -70
  47. tests/fetchers/sync/test_httpx.py +0 -84
  48. tests/fetchers/sync/test_playwright.py +0 -89
  49. tests/fetchers/test_utils.py +0 -97
  50. tests/parser/__init__.py +0 -0
  51. tests/parser/test_automatch.py +0 -111
  52. tests/parser/test_general.py +0 -330
  53. {scrapling-0.2.99.dist-info → scrapling-0.3.dist-info}/entry_points.txt +0 -0
  54. {scrapling-0.2.99.dist-info → scrapling-0.3.dist-info}/licenses/LICENSE +0 -0
scrapling/fetchers.py CHANGED
@@ -1,284 +1,127 @@
1
- from scrapling.core._types import (Callable, Dict, List, Literal, Optional,
2
- SelectorWaitStates, Union)
3
- from scrapling.engines import (CamoufoxEngine, PlaywrightEngine, StaticEngine,
4
- check_if_engine_usable)
1
+ from scrapling.core._types import (
2
+ Callable,
3
+ Dict,
4
+ List,
5
+ Optional,
6
+ SelectorWaitStates,
7
+ Iterable,
8
+ )
9
+ from scrapling.engines import (
10
+ FetcherSession,
11
+ StealthySession,
12
+ AsyncStealthySession,
13
+ DynamicSession,
14
+ AsyncDynamicSession,
15
+ FetcherClient as _FetcherClient,
16
+ AsyncFetcherClient as _AsyncFetcherClient,
17
+ )
5
18
  from scrapling.engines.toolbelt import BaseFetcher, Response
6
19
 
20
+ __FetcherClientInstance__ = _FetcherClient()
21
+ __AsyncFetcherClientInstance__ = _AsyncFetcherClient()
7
22
 
8
- class Fetcher(BaseFetcher):
9
- """A basic `Fetcher` class type that can only do basic GET, POST, PUT, and DELETE HTTP requests based on httpx.
10
-
11
- Any additional keyword arguments passed to the methods below are passed to the respective httpx's method directly.
12
- """
13
- @classmethod
14
- def get(
15
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
16
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
17
- """Make basic HTTP GET request for you but with some added flavors.
18
-
19
- :param url: Target url.
20
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
21
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
22
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
23
- create a referer header as if this request had came from Google's search of this URL's domain.
24
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
25
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
26
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
27
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
28
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
29
- """
30
- if not custom_config:
31
- custom_config = {}
32
- elif not isinstance(custom_config, dict):
33
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
34
-
35
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
36
- response_object = StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries, adaptor_arguments=adaptor_arguments).get(**kwargs)
37
- return response_object
38
-
39
- @classmethod
40
- def post(
41
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
42
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
43
- """Make basic HTTP POST request for you but with some added flavors.
44
-
45
- :param url: Target url.
46
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
47
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
48
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
49
- create a referer header as if this request came from Google's search of this URL's domain.
50
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
51
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
52
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
53
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
54
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
55
- """
56
- if not custom_config:
57
- custom_config = {}
58
- elif not isinstance(custom_config, dict):
59
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
60
-
61
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
62
- response_object = StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries, adaptor_arguments=adaptor_arguments).post(**kwargs)
63
- return response_object
64
-
65
- @classmethod
66
- def put(
67
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
68
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
69
- """Make basic HTTP PUT request for you but with some added flavors.
70
23
 
71
- :param url: Target url
72
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
73
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
74
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
75
- create a referer header as if this request came from Google's search of this URL's domain.
76
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
77
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
78
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
79
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.put()` function so check httpx documentation for details.
80
-
81
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
82
- """
83
- if not custom_config:
84
- custom_config = {}
85
- elif not isinstance(custom_config, dict):
86
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
87
-
88
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
89
- response_object = StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries, adaptor_arguments=adaptor_arguments).put(**kwargs)
90
- return response_object
91
-
92
- @classmethod
93
- def delete(
94
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
95
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
96
- """Make basic HTTP DELETE request for you but with some added flavors.
97
-
98
- :param url: Target url
99
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
100
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
101
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
102
- create a referer header as if this request came from Google's search of this URL's domain.
103
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
104
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
105
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
106
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
107
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
108
- """
109
- if not custom_config:
110
- custom_config = {}
111
- elif not isinstance(custom_config, dict):
112
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
113
-
114
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
115
- response_object = StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries, adaptor_arguments=adaptor_arguments).delete(**kwargs)
116
- return response_object
117
-
118
-
119
- class AsyncFetcher(Fetcher):
120
- @classmethod
121
- async def get(
122
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
123
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
124
- """Make basic HTTP GET request for you but with some added flavors.
125
-
126
- :param url: Target url.
127
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
128
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
129
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
130
- create a referer header as if this request had came from Google's search of this URL's domain.
131
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
132
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
133
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
134
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
135
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
136
- """
137
- if not custom_config:
138
- custom_config = {}
139
- elif not isinstance(custom_config, dict):
140
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
141
-
142
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
143
- response_object = await StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries=retries, adaptor_arguments=adaptor_arguments).async_get(**kwargs)
144
- return response_object
145
-
146
- @classmethod
147
- async def post(
148
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
149
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
150
- """Make basic HTTP POST request for you but with some added flavors.
151
-
152
- :param url: Target url.
153
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
154
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
155
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
156
- create a referer header as if this request came from Google's search of this URL's domain.
157
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
158
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
159
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
160
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
161
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
162
- """
163
- if not custom_config:
164
- custom_config = {}
165
- elif not isinstance(custom_config, dict):
166
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
167
-
168
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
169
- response_object = await StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries=retries, adaptor_arguments=adaptor_arguments).async_post(**kwargs)
170
- return response_object
171
-
172
- @classmethod
173
- async def put(
174
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
175
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
176
- """Make basic HTTP PUT request for you but with some added flavors.
177
-
178
- :param url: Target url
179
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
180
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
181
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
182
- create a referer header as if this request came from Google's search of this URL's domain.
183
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
184
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
185
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
186
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.put()` function so check httpx documentation for details.
187
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
188
- """
189
- if not custom_config:
190
- custom_config = {}
191
- elif not isinstance(custom_config, dict):
192
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
24
+ class Fetcher(BaseFetcher):
25
+ """A basic `Fetcher` class type that can only do basic GET, POST, PUT, and DELETE HTTP requests based on `curl_cffi`."""
193
26
 
194
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
195
- response_object = await StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries=retries, adaptor_arguments=adaptor_arguments).async_put(**kwargs)
196
- return response_object
27
+ get = __FetcherClientInstance__.get
28
+ post = __FetcherClientInstance__.post
29
+ put = __FetcherClientInstance__.put
30
+ delete = __FetcherClientInstance__.delete
197
31
 
198
- @classmethod
199
- async def delete(
200
- cls, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = 10, stealthy_headers: bool = True,
201
- proxy: Optional[str] = None, retries: Optional[int] = 3, custom_config: Dict = None, **kwargs: Dict) -> Response:
202
- """Make basic HTTP DELETE request for you but with some added flavors.
203
32
 
204
- :param url: Target url
205
- :param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
206
- :param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
207
- :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
208
- create a referer header as if this request came from Google's search of this URL's domain.
209
- :param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
210
- :param retries: The number of retries to do through httpx if the request failed for any reason. The default is 3 retries.
211
- :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
212
- :param kwargs: Any additional keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
213
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
214
- """
215
- if not custom_config:
216
- custom_config = {}
217
- elif not isinstance(custom_config, dict):
218
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
33
+ class AsyncFetcher(BaseFetcher):
34
+ """A basic `Fetcher` class type that can only do basic GET, POST, PUT, and DELETE HTTP requests based on `curl_cffi`."""
219
35
 
220
- adaptor_arguments = tuple({**cls._generate_parser_arguments(), **custom_config}.items())
221
- response_object = await StaticEngine(url, proxy, stealthy_headers, follow_redirects, timeout, retries=retries, adaptor_arguments=adaptor_arguments).async_delete(**kwargs)
222
- return response_object
36
+ get = __AsyncFetcherClientInstance__.get
37
+ post = __AsyncFetcherClientInstance__.post
38
+ put = __AsyncFetcherClientInstance__.put
39
+ delete = __AsyncFetcherClientInstance__.delete
223
40
 
224
41
 
225
42
  class StealthyFetcher(BaseFetcher):
226
- """A `Fetcher` class type that is completely stealthy fetcher that uses a modified version of Firefox.
43
+ """A `Fetcher` class type that is a completely stealthy fetcher that uses a modified version of Firefox.
227
44
 
228
- It works as real browsers passing almost all online tests/protections based on Camoufox.
229
- Other added flavors include setting the faked OS fingerprints to match the user's OS and the referer of every request is set as if this request came from Google's search of this URL's domain.
45
+ It works as real browsers passing almost all online tests/protections based on Camoufox.
46
+ Other added flavors include setting the faked OS fingerprints to match the user's OS, and the referer of every request is set as if this request came from Google's search of this URL's domain.
230
47
  """
48
+
231
49
  @classmethod
232
50
  def fetch(
233
- cls, url: str, headless: Union[bool, Literal['virtual']] = True, block_images: bool = False, disable_resources: bool = False,
234
- block_webrtc: bool = False, allow_webgl: bool = True, network_idle: bool = False, addons: Optional[List[str]] = None, wait: Optional[int] = 0,
235
- timeout: Optional[float] = 30000, page_action: Callable = None, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
236
- wait_selector_state: SelectorWaitStates = 'attached', google_search: bool = True, extra_headers: Optional[Dict[str, str]] = None,
237
- proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: bool = False, disable_ads: bool = False, geoip: bool = False,
238
- custom_config: Dict = None, additional_arguments: Dict = None
51
+ cls,
52
+ url: str,
53
+ headless: bool = True, # noqa: F821
54
+ block_images: bool = False,
55
+ disable_resources: bool = False,
56
+ block_webrtc: bool = False,
57
+ allow_webgl: bool = True,
58
+ network_idle: bool = False,
59
+ humanize: bool | float = True,
60
+ solve_cloudflare: bool = False,
61
+ wait: int | float = 0,
62
+ timeout: int | float = 30000,
63
+ page_action: Optional[Callable] = None,
64
+ wait_selector: Optional[str] = None,
65
+ addons: Optional[List[str]] = None,
66
+ wait_selector_state: SelectorWaitStates = "attached",
67
+ cookies: Optional[List[Dict]] = None,
68
+ google_search: bool = True,
69
+ extra_headers: Optional[Dict[str, str]] = None,
70
+ proxy: Optional[str | Dict[str, str]] = None,
71
+ os_randomize: bool = False,
72
+ disable_ads: bool = False,
73
+ geoip: bool = False,
74
+ custom_config: Optional[Dict] = None,
75
+ additional_args: Optional[Dict] = None,
239
76
  ) -> Response:
240
77
  """
241
78
  Opens up a browser and do your request based on your chosen options below.
242
79
 
243
80
  :param url: Target url.
244
- :param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
81
+ :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
245
82
  :param block_images: Prevent the loading of images through Firefox preferences.
246
83
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
247
- :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
84
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends, but it made requests ~25% faster in my tests for some websites.
248
85
  Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
249
86
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
250
87
  :param block_webrtc: Blocks WebRTC entirely.
88
+ :param cookies: Set cookies for the next request.
251
89
  :param addons: List of Firefox addons to use. Must be paths to extracted addons.
252
- :param disable_ads: Disabled by default, this installs `uBlock Origin` addon on the browser if enabled.
253
90
  :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
254
- :param allow_webgl: Enabled by default. Disabling it WebGL not recommended as many WAFs now checks if WebGL is enabled.
255
- :param geoip: Recommended to use with proxies; Automatically use IP's longitude, latitude, timezone, country, locale, & spoof the WebRTC IP address.
256
- It will also calculate and spoof the browser's language based on the distribution of language speakers in the target region.
91
+ :param solve_cloudflare: Solves all 3 types of the Cloudflare's Turnstile wait page before returning the response to you.
92
+ :param allow_webgl: Enabled by default. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
257
93
  :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
94
+ :param disable_ads: Disabled by default, this installs the `uBlock Origin` addon on the browser if enabled.
258
95
  :param os_randomize: If enabled, Scrapling will randomize the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
259
- :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000.
260
- :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning `Response` object.
96
+ :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
97
+ :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
261
98
  :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
262
- :param wait_selector: Wait for a specific css selector to be in a specific state.
263
- :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
264
- :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
99
+ :param wait_selector: Wait for a specific CSS selector to be in a specific state.
100
+ :param geoip: Recommended to use with proxies; Automatically use IP's longitude, latitude, timezone, country, locale, and spoof the WebRTC IP address.
101
+ It will also calculate and spoof the browser's language based on the distribution of language speakers in the target region.
102
+ :param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
103
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
265
104
  :param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
266
105
  :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
267
106
  :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
268
- :param additional_arguments: Additional arguments to be passed to Camoufox as additional settings and it takes higher priority than Scrapling's settings.
269
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
107
+ :param additional_args: Additional arguments to be passed to Camoufox as additional settings, and it takes higher priority than Scrapling's settings.
108
+ :return: A `Response` object.
270
109
  """
271
110
  if not custom_config:
272
111
  custom_config = {}
273
112
  elif not isinstance(custom_config, dict):
274
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
113
+ ValueError(
114
+ f"The custom parser config must be of type dictionary, got {cls.__class__}"
115
+ )
275
116
 
276
- engine = CamoufoxEngine(
117
+ with StealthySession(
277
118
  wait=wait,
119
+ max_pages=1,
278
120
  proxy=proxy,
279
121
  geoip=geoip,
280
122
  addons=addons,
281
123
  timeout=timeout,
124
+ cookies=cookies,
282
125
  headless=headless,
283
126
  humanize=humanize,
284
127
  disable_ads=disable_ads,
@@ -291,64 +134,90 @@ class StealthyFetcher(BaseFetcher):
291
134
  wait_selector=wait_selector,
292
135
  google_search=google_search,
293
136
  extra_headers=extra_headers,
137
+ solve_cloudflare=solve_cloudflare,
294
138
  disable_resources=disable_resources,
295
139
  wait_selector_state=wait_selector_state,
296
- adaptor_arguments={**cls._generate_parser_arguments(), **custom_config},
297
- additional_arguments=additional_arguments or {}
298
- )
299
- return engine.fetch(url)
140
+ selector_config={**cls._generate_parser_arguments(), **custom_config},
141
+ additional_args=additional_args or {},
142
+ ) as engine:
143
+ return engine.fetch(url)
300
144
 
301
145
  @classmethod
302
146
  async def async_fetch(
303
- cls, url: str, headless: Union[bool, Literal['virtual']] = True, block_images: bool = False, disable_resources: bool = False,
304
- block_webrtc: bool = False, allow_webgl: bool = True, network_idle: bool = False, addons: Optional[List[str]] = None, wait: Optional[int] = 0,
305
- timeout: Optional[float] = 30000, page_action: Callable = None, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
306
- wait_selector_state: SelectorWaitStates = 'attached', google_search: bool = True, extra_headers: Optional[Dict[str, str]] = None,
307
- proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: bool = False, disable_ads: bool = False, geoip: bool = False,
308
- custom_config: Dict = None, additional_arguments: Dict = None
147
+ cls,
148
+ url: str,
149
+ headless: bool = True, # noqa: F821
150
+ block_images: bool = False,
151
+ disable_resources: bool = False,
152
+ block_webrtc: bool = False,
153
+ allow_webgl: bool = True,
154
+ network_idle: bool = False,
155
+ humanize: bool | float = True,
156
+ solve_cloudflare: bool = False,
157
+ wait: int | float = 0,
158
+ timeout: int | float = 30000,
159
+ page_action: Optional[Callable] = None,
160
+ wait_selector: Optional[str] = None,
161
+ addons: Optional[List[str]] = None,
162
+ wait_selector_state: SelectorWaitStates = "attached",
163
+ cookies: Optional[List[Dict]] = None,
164
+ google_search: bool = True,
165
+ extra_headers: Optional[Dict[str, str]] = None,
166
+ proxy: Optional[str | Dict[str, str]] = None,
167
+ os_randomize: bool = False,
168
+ disable_ads: bool = False,
169
+ geoip: bool = False,
170
+ custom_config: Optional[Dict] = None,
171
+ additional_args: Optional[Dict] = None,
309
172
  ) -> Response:
310
173
  """
311
174
  Opens up a browser and do your request based on your chosen options below.
312
175
 
313
176
  :param url: Target url.
314
- :param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
177
+ :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
315
178
  :param block_images: Prevent the loading of images through Firefox preferences.
316
179
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
317
- :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
180
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends, but it made requests ~25% faster in my tests for some websites.
318
181
  Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
319
182
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
320
183
  :param block_webrtc: Blocks WebRTC entirely.
184
+ :param cookies: Set cookies for the next request.
321
185
  :param addons: List of Firefox addons to use. Must be paths to extracted addons.
322
- :param disable_ads: Disabled by default, this installs `uBlock Origin` addon on the browser if enabled.
323
186
  :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
324
- :param allow_webgl: Enabled by default. Disabling it WebGL not recommended as many WAFs now checks if WebGL is enabled.
325
- :param geoip: Recommended to use with proxies; Automatically use IP's longitude, latitude, timezone, country, locale, & spoof the WebRTC IP address.
326
- It will also calculate and spoof the browser's language based on the distribution of language speakers in the target region.
187
+ :param solve_cloudflare: Solves all 3 types of the Cloudflare's Turnstile wait page before returning the response to you.
188
+ :param allow_webgl: Enabled by default. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
327
189
  :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
190
+ :param disable_ads: Disabled by default, this installs the `uBlock Origin` addon on the browser if enabled.
328
191
  :param os_randomize: If enabled, Scrapling will randomize the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
329
- :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
330
- :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning `Response` object.
192
+ :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
193
+ :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
331
194
  :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
332
- :param wait_selector: Wait for a specific css selector to be in a specific state.
333
- :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
334
- :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
195
+ :param wait_selector: Wait for a specific CSS selector to be in a specific state.
196
+ :param geoip: Recommended to use with proxies; Automatically use IP's longitude, latitude, timezone, country, locale, and spoof the WebRTC IP address.
197
+ It will also calculate and spoof the browser's language based on the distribution of language speakers in the target region.
198
+ :param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
199
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
335
200
  :param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
336
201
  :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
337
202
  :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
338
- :param additional_arguments: Additional arguments to be passed to Camoufox as additional settings and it takes higher priority than Scrapling's settings.
339
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
203
+ :param additional_args: Additional arguments to be passed to Camoufox as additional settings, and it takes higher priority than Scrapling's settings.
204
+ :return: A `Response` object.
340
205
  """
341
206
  if not custom_config:
342
207
  custom_config = {}
343
208
  elif not isinstance(custom_config, dict):
344
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
209
+ ValueError(
210
+ f"The custom parser config must be of type dictionary, got {cls.__class__}"
211
+ )
345
212
 
346
- engine = CamoufoxEngine(
213
+ async with AsyncStealthySession(
347
214
  wait=wait,
215
+ max_pages=1,
348
216
  proxy=proxy,
349
217
  geoip=geoip,
350
218
  addons=addons,
351
219
  timeout=timeout,
220
+ cookies=cookies,
352
221
  headless=headless,
353
222
  humanize=humanize,
354
223
  disable_ads=disable_ads,
@@ -361,82 +230,99 @@ class StealthyFetcher(BaseFetcher):
361
230
  wait_selector=wait_selector,
362
231
  google_search=google_search,
363
232
  extra_headers=extra_headers,
233
+ solve_cloudflare=solve_cloudflare,
364
234
  disable_resources=disable_resources,
365
235
  wait_selector_state=wait_selector_state,
366
- adaptor_arguments={**cls._generate_parser_arguments(), **custom_config},
367
- additional_arguments=additional_arguments or {}
368
- )
369
- return await engine.async_fetch(url)
236
+ selector_config={**cls._generate_parser_arguments(), **custom_config},
237
+ additional_args=additional_args or {},
238
+ ) as engine:
239
+ return await engine.fetch(url)
370
240
 
371
241
 
372
- class PlayWrightFetcher(BaseFetcher):
242
+ class DynamicFetcher(BaseFetcher):
373
243
  """A `Fetcher` class type that provide many options, all of them are based on PlayWright.
374
244
 
375
245
  Using this Fetcher class, you can do requests with:
376
246
  - Vanilla Playwright without any modifications other than the ones you chose.
377
- - Stealthy Playwright with the stealth mode I wrote for it. It's still a work in progress but it bypasses many online tests like bot.sannysoft.com
247
+ - Stealthy Playwright with the stealth mode I wrote for it. It's still a work in progress, but it bypasses many online tests like bot.sannysoft.com
378
248
  Some of the things stealth mode does include:
379
249
  1) Patches the CDP runtime fingerprint.
380
250
  2) Mimics some of the real browsers' properties by injecting several JS files and using custom options.
381
251
  3) Using custom flags on launch to hide Playwright even more and make it faster.
382
- 4) Generates real browser's headers of the same type and same user OS then append it to the request.
383
- - Real browsers by passing the `real_chrome` argument or the CDP URL of your browser to be controlled by the Fetcher and most of the options can be enabled on it.
252
+ 4) Generates real browser's headers of the same type and same user OS, then append it to the request.
253
+ - Real browsers by passing the `real_chrome` argument or the CDP URL of your browser to be controlled by the Fetcher, and most of the options can be enabled on it.
384
254
  - NSTBrowser's docker browserless option by passing the CDP URL and enabling `nstbrowser_mode` option.
385
255
 
386
- > Note that these are the main options with PlayWright but it can be mixed together.
256
+ > Note that these are the main options with PlayWright, but it can be mixed.
387
257
  """
258
+
388
259
  @classmethod
389
260
  def fetch(
390
- cls, url: str, headless: Union[bool, str] = True, disable_resources: bool = None,
391
- useragent: Optional[str] = None, network_idle: bool = False, timeout: Optional[float] = 30000, wait: Optional[int] = 0,
392
- page_action: Optional[Callable] = None, wait_selector: Optional[str] = None, wait_selector_state: SelectorWaitStates = 'attached',
393
- hide_canvas: bool = False, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: bool = True,
394
- proxy: Optional[Union[str, Dict[str, str]]] = None, locale: Optional[str] = 'en-US',
395
- stealth: bool = False, real_chrome: bool = False,
396
- cdp_url: Optional[str] = None,
397
- nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
398
- custom_config: Dict = None
261
+ cls,
262
+ url: str,
263
+ headless: bool = True,
264
+ google_search: bool = True,
265
+ hide_canvas: bool = False,
266
+ disable_webgl: bool = False,
267
+ real_chrome: bool = False,
268
+ stealth: bool = False,
269
+ wait: int | float = 0,
270
+ page_action: Optional[Callable] = None,
271
+ proxy: Optional[str | Dict[str, str]] = None,
272
+ locale: str = "en-US",
273
+ extra_headers: Optional[Dict[str, str]] = None,
274
+ useragent: Optional[str] = None,
275
+ cdp_url: Optional[str] = None,
276
+ timeout: int | float = 30000,
277
+ disable_resources: bool = False,
278
+ wait_selector: Optional[str] = None,
279
+ cookies: Optional[Iterable[Dict]] = None,
280
+ network_idle: bool = False,
281
+ wait_selector_state: SelectorWaitStates = "attached",
282
+ custom_config: Optional[Dict] = None,
399
283
  ) -> Response:
400
284
  """Opens up a browser and do your request based on your chosen options below.
401
285
 
402
286
  :param url: Target url.
403
287
  :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
404
- :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
288
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends, but it made requests ~25% faster in my tests for some websites.
405
289
  Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
406
290
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
407
291
  :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
292
+ :param cookies: Set cookies for the next request.
408
293
  :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
409
- :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000.
410
- :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning `Response` object.
411
- :param locale: Set the locale for the browser if wanted. The default value is `en-US`.
294
+ :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
295
+ :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
412
296
  :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
413
- :param wait_selector: Wait for a specific css selector to be in a specific state.
414
- :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
297
+ :param wait_selector: Wait for a specific CSS selector to be in a specific state.
298
+ :param locale: Set the locale for the browser if wanted. The default value is `en-US`.
299
+ :param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
415
300
  :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
416
- :param real_chrome: If you have chrome browser installed on your device, enable this and the Fetcher will launch an instance of your browser and use it.
301
+ :param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
417
302
  :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
418
303
  :param disable_webgl: Disables WebGL and WebGL 2.0 support entirely.
419
- :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
304
+ :param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
305
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
420
306
  :param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
421
307
  :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
422
- :param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
423
- :param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
424
- :param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
425
308
  :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
426
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
309
+ :return: A `Response` object.
427
310
  """
428
311
  if not custom_config:
429
312
  custom_config = {}
430
313
  elif not isinstance(custom_config, dict):
431
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
314
+ raise ValueError(
315
+ f"The custom parser config must be of type dictionary, got {cls.__class__}"
316
+ )
432
317
 
433
- engine = PlaywrightEngine(
318
+ with DynamicSession(
434
319
  wait=wait,
435
320
  proxy=proxy,
436
321
  locale=locale,
437
322
  timeout=timeout,
438
323
  stealth=stealth,
439
324
  cdp_url=cdp_url,
325
+ cookies=cookies,
440
326
  headless=headless,
441
327
  useragent=useragent,
442
328
  real_chrome=real_chrome,
@@ -447,68 +333,82 @@ class PlayWrightFetcher(BaseFetcher):
447
333
  extra_headers=extra_headers,
448
334
  wait_selector=wait_selector,
449
335
  disable_webgl=disable_webgl,
450
- nstbrowser_mode=nstbrowser_mode,
451
- nstbrowser_config=nstbrowser_config,
452
336
  disable_resources=disable_resources,
453
337
  wait_selector_state=wait_selector_state,
454
- adaptor_arguments={**cls._generate_parser_arguments(), **custom_config},
455
- )
456
- return engine.fetch(url)
338
+ selector_config={**cls._generate_parser_arguments(), **custom_config},
339
+ ) as session:
340
+ return session.fetch(url)
457
341
 
458
342
  @classmethod
459
343
  async def async_fetch(
460
- cls, url: str, headless: Union[bool, str] = True, disable_resources: bool = None,
461
- useragent: Optional[str] = None, network_idle: bool = False, timeout: Optional[float] = 30000, wait: Optional[int] = 0,
462
- page_action: Optional[Callable] = None, wait_selector: Optional[str] = None, wait_selector_state: SelectorWaitStates = 'attached',
463
- hide_canvas: bool = False, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: bool = True,
464
- proxy: Optional[Union[str, Dict[str, str]]] = None, locale: Optional[str] = 'en-US',
465
- stealth: bool = False, real_chrome: bool = False,
466
- cdp_url: Optional[str] = None,
467
- nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
468
- custom_config: Dict = None
344
+ cls,
345
+ url: str,
346
+ headless: bool = True,
347
+ google_search: bool = True,
348
+ hide_canvas: bool = False,
349
+ disable_webgl: bool = False,
350
+ real_chrome: bool = False,
351
+ stealth: bool = False,
352
+ wait: int | float = 0,
353
+ page_action: Optional[Callable] = None,
354
+ proxy: Optional[str | Dict[str, str]] = None,
355
+ locale: str = "en-US",
356
+ extra_headers: Optional[Dict[str, str]] = None,
357
+ useragent: Optional[str] = None,
358
+ cdp_url: Optional[str] = None,
359
+ timeout: int | float = 30000,
360
+ disable_resources: bool = False,
361
+ wait_selector: Optional[str] = None,
362
+ cookies: Optional[Iterable[Dict]] = None,
363
+ network_idle: bool = False,
364
+ wait_selector_state: SelectorWaitStates = "attached",
365
+ custom_config: Optional[Dict] = None,
469
366
  ) -> Response:
470
367
  """Opens up a browser and do your request based on your chosen options below.
471
368
 
472
369
  :param url: Target url.
473
370
  :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
474
- :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
371
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends, but it made requests ~25% faster in my tests for some websites.
475
372
  Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
476
373
  This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
477
374
  :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
375
+ :param cookies: Set cookies for the next request.
478
376
  :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
479
- :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000.
480
- :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning `Response` object.
481
- :param locale: Set the locale for the browser if wanted. The default value is `en-US`.
377
+ :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
378
+ :param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
482
379
  :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
483
- :param wait_selector: Wait for a specific css selector to be in a specific state.
484
- :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
380
+ :param wait_selector: Wait for a specific CSS selector to be in a specific state.
381
+ :param locale: Set the locale for the browser if wanted. The default value is `en-US`.
382
+ :param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
485
383
  :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
486
- :param real_chrome: If you have chrome browser installed on your device, enable this and the Fetcher will launch an instance of your browser and use it.
384
+ :param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
487
385
  :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
488
386
  :param disable_webgl: Disables WebGL and WebGL 2.0 support entirely.
489
- :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
387
+ :param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
388
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
490
389
  :param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
491
390
  :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
492
- :param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
493
- :param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
494
- :param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
495
391
  :param custom_config: A dictionary of custom parser arguments to use with this request. Any argument passed will override any class parameters values.
496
- :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
392
+ :return: A `Response` object.
497
393
  """
498
394
  if not custom_config:
499
395
  custom_config = {}
500
396
  elif not isinstance(custom_config, dict):
501
- ValueError(f"The custom parser config must be of type dictionary, got {cls.__class__}")
397
+ raise ValueError(
398
+ f"The custom parser config must be of type dictionary, got {cls.__class__}"
399
+ )
502
400
 
503
- engine = PlaywrightEngine(
401
+ async with AsyncDynamicSession(
504
402
  wait=wait,
505
403
  proxy=proxy,
506
404
  locale=locale,
507
405
  timeout=timeout,
508
406
  stealth=stealth,
509
407
  cdp_url=cdp_url,
408
+ cookies=cookies,
510
409
  headless=headless,
511
410
  useragent=useragent,
411
+ max_pages=1,
512
412
  real_chrome=real_chrome,
513
413
  page_action=page_action,
514
414
  hide_canvas=hide_canvas,
@@ -517,17 +417,11 @@ class PlayWrightFetcher(BaseFetcher):
517
417
  extra_headers=extra_headers,
518
418
  wait_selector=wait_selector,
519
419
  disable_webgl=disable_webgl,
520
- nstbrowser_mode=nstbrowser_mode,
521
- nstbrowser_config=nstbrowser_config,
522
420
  disable_resources=disable_resources,
523
421
  wait_selector_state=wait_selector_state,
524
- adaptor_arguments={**cls._generate_parser_arguments(), **custom_config},
525
- )
526
- return await engine.async_fetch(url)
422
+ selector_config={**cls._generate_parser_arguments(), **custom_config},
423
+ ) as session:
424
+ return await session.fetch(url)
527
425
 
528
426
 
529
- class CustomFetcher(BaseFetcher):
530
- @classmethod
531
- def fetch(cls, url: str, browser_engine, **kwargs) -> Response:
532
- engine = check_if_engine_usable(browser_engine)(adaptor_arguments=cls._generate_parser_arguments(), **kwargs)
533
- return engine.fetch(url)
427
+ PlayWrightFetcher = DynamicFetcher # For backward-compatibility