scrapling 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scrapling/__init__.py +4 -4
- scrapling/core/custom_types.py +88 -6
- scrapling/core/storage_adaptors.py +5 -6
- scrapling/core/translator.py +2 -2
- scrapling/core/utils.py +29 -27
- scrapling/defaults.py +2 -1
- scrapling/engines/camo.py +89 -15
- scrapling/engines/constants.py +4 -4
- scrapling/engines/pw.py +158 -83
- scrapling/engines/static.py +91 -48
- scrapling/engines/toolbelt/__init__.py +3 -3
- scrapling/engines/toolbelt/custom.py +20 -22
- scrapling/engines/toolbelt/fingerprints.py +3 -3
- scrapling/engines/toolbelt/navigation.py +21 -8
- scrapling/fetchers.py +229 -14
- scrapling/parser.py +49 -21
- {scrapling-0.2.8.dist-info → scrapling-0.2.9.dist-info}/METADATA +32 -16
- scrapling-0.2.9.dist-info/RECORD +47 -0
- tests/fetchers/async/__init__.py +0 -0
- tests/fetchers/async/test_camoufox.py +95 -0
- tests/fetchers/async/test_httpx.py +83 -0
- tests/fetchers/async/test_playwright.py +99 -0
- tests/fetchers/sync/__init__.py +0 -0
- tests/fetchers/sync/test_camoufox.py +68 -0
- tests/fetchers/sync/test_httpx.py +82 -0
- tests/fetchers/sync/test_playwright.py +87 -0
- tests/fetchers/test_utils.py +90 -122
- tests/parser/test_automatch.py +64 -9
- tests/parser/test_general.py +260 -218
- scrapling-0.2.8.dist-info/RECORD +0 -42
- tests/fetchers/test_camoufox.py +0 -65
- tests/fetchers/test_httpx.py +0 -68
- tests/fetchers/test_playwright.py +0 -77
- {scrapling-0.2.8.dist-info → scrapling-0.2.9.dist-info}/LICENSE +0 -0
- {scrapling-0.2.8.dist-info → scrapling-0.2.9.dist-info}/WHEEL +0 -0
- {scrapling-0.2.8.dist-info → scrapling-0.2.9.dist-info}/top_level.txt +0 -0
scrapling/engines/pw.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1
1
|
import json
|
2
|
-
import logging
|
3
2
|
|
4
|
-
from scrapling.core._types import Callable, Dict,
|
3
|
+
from scrapling.core._types import Callable, Dict, Optional, Union
|
4
|
+
from scrapling.core.utils import log, lru_cache
|
5
5
|
from scrapling.engines.constants import (DEFAULT_STEALTH_FLAGS,
|
6
6
|
NSTBROWSER_DEFAULT_QUERY)
|
7
7
|
from scrapling.engines.toolbelt import (Response, StatusText,
|
8
|
+
async_intercept_route,
|
8
9
|
check_type_validity, construct_cdp_url,
|
9
|
-
construct_proxy_dict,
|
10
|
+
construct_proxy_dict,
|
10
11
|
generate_convincing_referer,
|
11
12
|
generate_headers, intercept_route,
|
12
13
|
js_bypass_path)
|
@@ -19,7 +20,7 @@ class PlaywrightEngine:
|
|
19
20
|
useragent: Optional[str] = None,
|
20
21
|
network_idle: Optional[bool] = False,
|
21
22
|
timeout: Optional[float] = 30000,
|
22
|
-
page_action: Callable =
|
23
|
+
page_action: Callable = None,
|
23
24
|
wait_selector: Optional[str] = None,
|
24
25
|
locale: Optional[str] = 'en-US',
|
25
26
|
wait_selector_state: Optional[str] = 'attached',
|
@@ -74,11 +75,14 @@ class PlaywrightEngine:
|
|
74
75
|
self.cdp_url = cdp_url
|
75
76
|
self.useragent = useragent
|
76
77
|
self.timeout = check_type_validity(timeout, [int, float], 30000)
|
77
|
-
if
|
78
|
-
|
78
|
+
if page_action is not None:
|
79
|
+
if callable(page_action):
|
80
|
+
self.page_action = page_action
|
81
|
+
else:
|
82
|
+
self.page_action = None
|
83
|
+
log.error('[Ignored] Argument "page_action" must be callable')
|
79
84
|
else:
|
80
|
-
self.page_action =
|
81
|
-
logging.error('[Ignored] Argument "page_action" must be callable')
|
85
|
+
self.page_action = None
|
82
86
|
|
83
87
|
self.wait_selector = wait_selector
|
84
88
|
self.wait_selector_state = wait_selector_state
|
@@ -94,10 +98,8 @@ class PlaywrightEngine:
|
|
94
98
|
# '--disable-extensions',
|
95
99
|
]
|
96
100
|
|
97
|
-
def _cdp_url_logic(self
|
101
|
+
def _cdp_url_logic(self) -> str:
|
98
102
|
"""Constructs new CDP URL if NSTBrowser is enabled otherwise return CDP URL as it is
|
99
|
-
|
100
|
-
:param flags: Chrome flags to be added to NSTBrowser query
|
101
103
|
:return: CDP URL
|
102
104
|
"""
|
103
105
|
cdp_url = self.cdp_url
|
@@ -106,7 +108,8 @@ class PlaywrightEngine:
|
|
106
108
|
config = self.nstbrowser_config
|
107
109
|
else:
|
108
110
|
query = NSTBROWSER_DEFAULT_QUERY.copy()
|
109
|
-
if
|
111
|
+
if self.stealth:
|
112
|
+
flags = self.__set_flags()
|
110
113
|
query.update({
|
111
114
|
"args": dict(zip(flags, [''] * len(flags))), # browser args should be a dictionary
|
112
115
|
})
|
@@ -122,6 +125,68 @@ class PlaywrightEngine:
|
|
122
125
|
|
123
126
|
return cdp_url
|
124
127
|
|
128
|
+
@lru_cache(typed=True)
|
129
|
+
def __set_flags(self):
|
130
|
+
"""Returns the flags that will be used while launching the browser if stealth mode is enabled"""
|
131
|
+
flags = DEFAULT_STEALTH_FLAGS
|
132
|
+
if self.hide_canvas:
|
133
|
+
flags += ('--fingerprinting-canvas-image-data-noise',)
|
134
|
+
if self.disable_webgl:
|
135
|
+
flags += ('--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2',)
|
136
|
+
|
137
|
+
return flags
|
138
|
+
|
139
|
+
def __launch_kwargs(self):
|
140
|
+
"""Creates the arguments we will use while launching playwright's browser"""
|
141
|
+
launch_kwargs = {'headless': self.headless, 'ignore_default_args': self.harmful_default_args, 'channel': 'chrome' if self.real_chrome else 'chromium'}
|
142
|
+
if self.stealth:
|
143
|
+
launch_kwargs.update({'args': self.__set_flags(), 'chromium_sandbox': True})
|
144
|
+
|
145
|
+
return launch_kwargs
|
146
|
+
|
147
|
+
def __context_kwargs(self):
|
148
|
+
"""Creates the arguments for the browser context"""
|
149
|
+
context_kwargs = {
|
150
|
+
"proxy": self.proxy,
|
151
|
+
"locale": self.locale,
|
152
|
+
"color_scheme": 'dark', # Bypasses the 'prefersLightColor' check in creepjs
|
153
|
+
"device_scale_factor": 2,
|
154
|
+
"extra_http_headers": self.extra_headers if self.extra_headers else {},
|
155
|
+
"user_agent": self.useragent if self.useragent else generate_headers(browser_mode=True).get('User-Agent'),
|
156
|
+
}
|
157
|
+
if self.stealth:
|
158
|
+
context_kwargs.update({
|
159
|
+
'is_mobile': False,
|
160
|
+
'has_touch': False,
|
161
|
+
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
|
162
|
+
'service_workers': 'allow',
|
163
|
+
'ignore_https_errors': True,
|
164
|
+
'screen': {'width': 1920, 'height': 1080},
|
165
|
+
'viewport': {'width': 1920, 'height': 1080},
|
166
|
+
'permissions': ['geolocation', 'notifications']
|
167
|
+
})
|
168
|
+
|
169
|
+
return context_kwargs
|
170
|
+
|
171
|
+
@lru_cache()
|
172
|
+
def __stealth_scripts(self):
|
173
|
+
# Basic bypasses nothing fancy as I'm still working on it
|
174
|
+
# But with adding these bypasses to the above config, it bypasses many online tests like
|
175
|
+
# https://bot.sannysoft.com/
|
176
|
+
# https://kaliiiiiiiiii.github.io/brotector/
|
177
|
+
# https://pixelscan.net/
|
178
|
+
# https://iphey.com/
|
179
|
+
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
|
180
|
+
# https://arh.antoinevastel.com/bots/areyouheadless/
|
181
|
+
# https://prescience-data.github.io/execution-monitor.html
|
182
|
+
return tuple(
|
183
|
+
js_bypass_path(script) for script in (
|
184
|
+
# Order is important
|
185
|
+
'webdriver_fully.js', 'window_chrome.js', 'navigator_plugins.js', 'pdf_viewer.js',
|
186
|
+
'notification_permission.js', 'screen_props.js', 'playwright_fingerprint.js'
|
187
|
+
)
|
188
|
+
)
|
189
|
+
|
125
190
|
def fetch(self, url: str) -> Response:
|
126
191
|
"""Opens up the browser and do your request based on your chosen options.
|
127
192
|
|
@@ -135,61 +200,14 @@ class PlaywrightEngine:
|
|
135
200
|
from rebrowser_playwright.sync_api import sync_playwright
|
136
201
|
|
137
202
|
with sync_playwright() as p:
|
138
|
-
# Handle the UserAgent early
|
139
|
-
if self.useragent:
|
140
|
-
extra_headers = {}
|
141
|
-
useragent = self.useragent
|
142
|
-
else:
|
143
|
-
extra_headers = {}
|
144
|
-
useragent = generate_headers(browser_mode=True).get('User-Agent')
|
145
|
-
|
146
|
-
# Prepare the flags before diving
|
147
|
-
flags = DEFAULT_STEALTH_FLAGS
|
148
|
-
if self.hide_canvas:
|
149
|
-
flags += ['--fingerprinting-canvas-image-data-noise']
|
150
|
-
if self.disable_webgl:
|
151
|
-
flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
|
152
|
-
|
153
203
|
# Creating the browser
|
154
204
|
if self.cdp_url:
|
155
|
-
cdp_url = self._cdp_url_logic(
|
205
|
+
cdp_url = self._cdp_url_logic()
|
156
206
|
browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
|
157
207
|
else:
|
158
|
-
|
159
|
-
browser = p.chromium.launch(
|
160
|
-
headless=self.headless, args=flags, ignore_default_args=self.harmful_default_args, chromium_sandbox=True, channel='chrome' if self.real_chrome else 'chromium'
|
161
|
-
)
|
162
|
-
else:
|
163
|
-
browser = p.chromium.launch(headless=self.headless, ignore_default_args=self.harmful_default_args, channel='chrome' if self.real_chrome else 'chromium')
|
164
|
-
|
165
|
-
# Creating the context
|
166
|
-
if self.stealth:
|
167
|
-
context = browser.new_context(
|
168
|
-
locale=self.locale,
|
169
|
-
is_mobile=False,
|
170
|
-
has_touch=False,
|
171
|
-
proxy=self.proxy,
|
172
|
-
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
|
173
|
-
user_agent=useragent,
|
174
|
-
device_scale_factor=2,
|
175
|
-
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
|
176
|
-
service_workers="allow",
|
177
|
-
ignore_https_errors=True,
|
178
|
-
extra_http_headers=extra_headers,
|
179
|
-
screen={"width": 1920, "height": 1080},
|
180
|
-
viewport={"width": 1920, "height": 1080},
|
181
|
-
permissions=["geolocation", 'notifications'],
|
182
|
-
)
|
183
|
-
else:
|
184
|
-
context = browser.new_context(
|
185
|
-
locale=self.locale,
|
186
|
-
proxy=self.proxy,
|
187
|
-
color_scheme='dark',
|
188
|
-
user_agent=useragent,
|
189
|
-
device_scale_factor=2,
|
190
|
-
extra_http_headers=extra_headers
|
191
|
-
)
|
208
|
+
browser = p.chromium.launch(**self.__launch_kwargs())
|
192
209
|
|
210
|
+
context = browser.new_context(**self.__context_kwargs())
|
193
211
|
# Finally we are in business
|
194
212
|
page = context.new_page()
|
195
213
|
page.set_default_navigation_timeout(self.timeout)
|
@@ -202,29 +220,16 @@ class PlaywrightEngine:
|
|
202
220
|
page.route("**/*", intercept_route)
|
203
221
|
|
204
222
|
if self.stealth:
|
205
|
-
|
206
|
-
|
207
|
-
# https://bot.sannysoft.com/
|
208
|
-
# https://kaliiiiiiiiii.github.io/brotector/
|
209
|
-
# https://pixelscan.net/
|
210
|
-
# https://iphey.com/
|
211
|
-
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
|
212
|
-
# https://arh.antoinevastel.com/bots/areyouheadless/
|
213
|
-
# https://prescience-data.github.io/execution-monitor.html
|
214
|
-
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
|
215
|
-
page.add_init_script(path=js_bypass_path('window_chrome.js'))
|
216
|
-
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
|
217
|
-
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
|
218
|
-
page.add_init_script(path=js_bypass_path('notification_permission.js'))
|
219
|
-
page.add_init_script(path=js_bypass_path('screen_props.js'))
|
220
|
-
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
|
223
|
+
for script in self.__stealth_scripts():
|
224
|
+
page.add_init_script(path=script)
|
221
225
|
|
222
226
|
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
|
223
227
|
page.wait_for_load_state(state="domcontentloaded")
|
224
228
|
if self.network_idle:
|
225
229
|
page.wait_for_load_state('networkidle')
|
226
230
|
|
227
|
-
|
231
|
+
if self.page_action is not None:
|
232
|
+
page = self.page_action(page)
|
228
233
|
|
229
234
|
if self.wait_selector and type(self.wait_selector) is str:
|
230
235
|
waiter = page.locator(self.wait_selector)
|
@@ -237,11 +242,8 @@ class PlaywrightEngine:
|
|
237
242
|
|
238
243
|
# This will be parsed inside `Response`
|
239
244
|
encoding = res.headers.get('content-type', '') or 'utf-8' # default encoding
|
240
|
-
|
241
|
-
status_text = res.status_text
|
242
245
|
# PlayWright API sometimes give empty status text for some reason!
|
243
|
-
|
244
|
-
status_text = StatusText.get(res.status)
|
246
|
+
status_text = res.status_text or StatusText.get(res.status)
|
245
247
|
|
246
248
|
response = Response(
|
247
249
|
url=res.url,
|
@@ -257,3 +259,76 @@ class PlaywrightEngine:
|
|
257
259
|
)
|
258
260
|
page.close()
|
259
261
|
return response
|
262
|
+
|
263
|
+
async def async_fetch(self, url: str) -> Response:
|
264
|
+
"""Async version of `fetch`
|
265
|
+
|
266
|
+
:param url: Target url.
|
267
|
+
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
268
|
+
"""
|
269
|
+
if not self.stealth or self.real_chrome:
|
270
|
+
# Because rebrowser_playwright doesn't play well with real browsers
|
271
|
+
from playwright.async_api import async_playwright
|
272
|
+
else:
|
273
|
+
from rebrowser_playwright.async_api import async_playwright
|
274
|
+
|
275
|
+
async with async_playwright() as p:
|
276
|
+
# Creating the browser
|
277
|
+
if self.cdp_url:
|
278
|
+
cdp_url = self._cdp_url_logic()
|
279
|
+
browser = await p.chromium.connect_over_cdp(endpoint_url=cdp_url)
|
280
|
+
else:
|
281
|
+
browser = await p.chromium.launch(**self.__launch_kwargs())
|
282
|
+
|
283
|
+
context = await browser.new_context(**self.__context_kwargs())
|
284
|
+
# Finally we are in business
|
285
|
+
page = await context.new_page()
|
286
|
+
page.set_default_navigation_timeout(self.timeout)
|
287
|
+
page.set_default_timeout(self.timeout)
|
288
|
+
|
289
|
+
if self.extra_headers:
|
290
|
+
await page.set_extra_http_headers(self.extra_headers)
|
291
|
+
|
292
|
+
if self.disable_resources:
|
293
|
+
await page.route("**/*", async_intercept_route)
|
294
|
+
|
295
|
+
if self.stealth:
|
296
|
+
for script in self.__stealth_scripts():
|
297
|
+
await page.add_init_script(path=script)
|
298
|
+
|
299
|
+
res = await page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
|
300
|
+
await page.wait_for_load_state(state="domcontentloaded")
|
301
|
+
if self.network_idle:
|
302
|
+
await page.wait_for_load_state('networkidle')
|
303
|
+
|
304
|
+
if self.page_action is not None:
|
305
|
+
page = await self.page_action(page)
|
306
|
+
|
307
|
+
if self.wait_selector and type(self.wait_selector) is str:
|
308
|
+
waiter = page.locator(self.wait_selector)
|
309
|
+
await waiter.first.wait_for(state=self.wait_selector_state)
|
310
|
+
# Wait again after waiting for the selector, helpful with protections like Cloudflare
|
311
|
+
await page.wait_for_load_state(state="load")
|
312
|
+
await page.wait_for_load_state(state="domcontentloaded")
|
313
|
+
if self.network_idle:
|
314
|
+
await page.wait_for_load_state('networkidle')
|
315
|
+
|
316
|
+
# This will be parsed inside `Response`
|
317
|
+
encoding = res.headers.get('content-type', '') or 'utf-8' # default encoding
|
318
|
+
# PlayWright API sometimes give empty status text for some reason!
|
319
|
+
status_text = res.status_text or StatusText.get(res.status)
|
320
|
+
|
321
|
+
response = Response(
|
322
|
+
url=res.url,
|
323
|
+
text=await page.content(),
|
324
|
+
body=(await page.content()).encode('utf-8'),
|
325
|
+
status=res.status,
|
326
|
+
reason=status_text,
|
327
|
+
encoding=encoding,
|
328
|
+
cookies={cookie['name']: cookie['value'] for cookie in await page.context.cookies()},
|
329
|
+
headers=await res.all_headers(),
|
330
|
+
request_headers=await res.request.all_headers(),
|
331
|
+
**self.adaptor_arguments
|
332
|
+
)
|
333
|
+
await page.close()
|
334
|
+
return response
|
scrapling/engines/static.py
CHANGED
@@ -1,34 +1,44 @@
|
|
1
|
-
import logging
|
2
|
-
|
3
1
|
import httpx
|
4
2
|
from httpx._models import Response as httpxResponse
|
5
3
|
|
6
|
-
from scrapling.core._types import Dict, Optional, Union
|
4
|
+
from scrapling.core._types import Dict, Optional, Tuple, Union
|
5
|
+
from scrapling.core.utils import log, lru_cache
|
7
6
|
|
8
7
|
from .toolbelt import Response, generate_convincing_referer, generate_headers
|
9
8
|
|
10
9
|
|
10
|
+
@lru_cache(typed=True)
|
11
11
|
class StaticEngine:
|
12
|
-
def __init__(
|
12
|
+
def __init__(
|
13
|
+
self, url: str, proxy: Optional[str] = None, stealthy_headers: Optional[bool] = True, follow_redirects: bool = True,
|
14
|
+
timeout: Optional[Union[int, float]] = None, retries: Optional[int] = 3, adaptor_arguments: Tuple = None
|
15
|
+
):
|
13
16
|
"""An engine that utilizes httpx library, check the `Fetcher` class for more documentation.
|
14
17
|
|
18
|
+
:param url: Target url.
|
19
|
+
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
|
20
|
+
create a referer header as if this request had came from Google's search of this URL's domain.
|
21
|
+
:param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
|
15
22
|
:param follow_redirects: As the name says -- if enabled (default), redirects will be followed.
|
16
23
|
:param timeout: The time to wait for the request to finish in seconds. The default is 10 seconds.
|
17
24
|
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
|
18
25
|
"""
|
26
|
+
self.url = url
|
27
|
+
self.proxy = proxy
|
28
|
+
self.stealth = stealthy_headers
|
19
29
|
self.timeout = timeout
|
20
30
|
self.follow_redirects = bool(follow_redirects)
|
31
|
+
self.retries = retries
|
21
32
|
self._extra_headers = generate_headers(browser_mode=False)
|
22
|
-
|
33
|
+
# Because we are using `lru_cache` for a slight optimization but both dict/dict_items are not hashable so they can't be cached
|
34
|
+
# So my solution here was to convert it to tuple then convert it back to dictionary again here as tuples are hashable, ofc `tuple().__hash__()`
|
35
|
+
self.adaptor_arguments = dict(adaptor_arguments) if adaptor_arguments else {}
|
23
36
|
|
24
|
-
|
25
|
-
def _headers_job(headers: Optional[Dict], url: str, stealth: bool) -> Dict:
|
37
|
+
def _headers_job(self, headers: Optional[Dict]) -> Dict:
|
26
38
|
"""Adds useragent to headers if it doesn't exist, generates real headers and append it to current headers, and
|
27
39
|
finally generates a referer header that looks like if this request came from Google's search of the current URL's domain.
|
28
40
|
|
29
41
|
:param headers: Current headers in the request if the user passed any
|
30
|
-
:param url: The Target URL.
|
31
|
-
:param stealth: Whether stealth mode is enabled or not.
|
32
42
|
:return: A dictionary of the new headers.
|
33
43
|
"""
|
34
44
|
headers = headers or {}
|
@@ -36,12 +46,12 @@ class StaticEngine:
|
|
36
46
|
# Validate headers
|
37
47
|
if not headers.get('user-agent') and not headers.get('User-Agent'):
|
38
48
|
headers['User-Agent'] = generate_headers(browser_mode=False).get('User-Agent')
|
39
|
-
|
49
|
+
log.debug(f"Can't find useragent in headers so '{headers['User-Agent']}' was used.")
|
40
50
|
|
41
|
-
if stealth:
|
51
|
+
if self.stealth:
|
42
52
|
extra_headers = generate_headers(browser_mode=False)
|
43
53
|
headers.update(extra_headers)
|
44
|
-
headers.update({'referer': generate_convincing_referer(url)})
|
54
|
+
headers.update({'referer': generate_convincing_referer(self.url)})
|
45
55
|
|
46
56
|
return headers
|
47
57
|
|
@@ -61,69 +71,102 @@ class StaticEngine:
|
|
61
71
|
cookies=dict(response.cookies),
|
62
72
|
headers=dict(response.headers),
|
63
73
|
request_headers=dict(response.request.headers),
|
74
|
+
method=response.request.method,
|
64
75
|
**self.adaptor_arguments
|
65
76
|
)
|
66
77
|
|
67
|
-
def get(self,
|
78
|
+
def get(self, **kwargs: Dict) -> Response:
|
68
79
|
"""Make basic HTTP GET request for you but with some added flavors.
|
69
80
|
|
70
|
-
:param
|
71
|
-
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
|
72
|
-
create a referer header as if this request had came from Google's search of this URL's domain.
|
73
|
-
:param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
|
74
|
-
:param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
|
81
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
|
75
82
|
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
76
83
|
"""
|
77
|
-
headers = self._headers_job(kwargs.pop('headers', {})
|
78
|
-
with httpx.Client(proxy=proxy) as client:
|
79
|
-
request = client.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
84
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
85
|
+
with httpx.Client(proxy=self.proxy, transport=httpx.HTTPTransport(retries=self.retries)) as client:
|
86
|
+
request = client.get(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
80
87
|
|
81
88
|
return self._prepare_response(request)
|
82
89
|
|
83
|
-
def
|
90
|
+
async def async_get(self, **kwargs: Dict) -> Response:
|
91
|
+
"""Make basic async HTTP GET request for you but with some added flavors.
|
92
|
+
|
93
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
|
94
|
+
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
95
|
+
"""
|
96
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
97
|
+
async with httpx.AsyncClient(proxy=self.proxy) as client:
|
98
|
+
request = await client.get(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
99
|
+
|
100
|
+
return self._prepare_response(request)
|
101
|
+
|
102
|
+
def post(self, **kwargs: Dict) -> Response:
|
84
103
|
"""Make basic HTTP POST request for you but with some added flavors.
|
85
104
|
|
86
|
-
:param
|
87
|
-
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
|
88
|
-
create a referer header as if this request had came from Google's search of this URL's domain.
|
89
|
-
:param proxy: A string of a proxy to use for http and https requests, the format accepted is `http://username:password@localhost:8030`
|
90
|
-
:param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
|
105
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
|
91
106
|
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
92
107
|
"""
|
93
|
-
headers = self._headers_job(kwargs.pop('headers', {})
|
94
|
-
with httpx.Client(proxy=proxy) as client:
|
95
|
-
request = client.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
108
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
109
|
+
with httpx.Client(proxy=self.proxy, transport=httpx.HTTPTransport(retries=self.retries)) as client:
|
110
|
+
request = client.post(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
96
111
|
|
97
112
|
return self._prepare_response(request)
|
98
113
|
|
99
|
-
def
|
114
|
+
async def async_post(self, **kwargs: Dict) -> Response:
|
115
|
+
"""Make basic async HTTP POST request for you but with some added flavors.
|
116
|
+
|
117
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
|
118
|
+
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
119
|
+
"""
|
120
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
121
|
+
async with httpx.AsyncClient(proxy=self.proxy) as client:
|
122
|
+
request = await client.post(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
123
|
+
|
124
|
+
return self._prepare_response(request)
|
125
|
+
|
126
|
+
def delete(self, **kwargs: Dict) -> Response:
|
100
127
|
"""Make basic HTTP DELETE request for you but with some added flavors.
|
101
128
|
|
102
|
-
:param
|
103
|
-
:
|
104
|
-
|
105
|
-
|
106
|
-
|
129
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
|
130
|
+
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
131
|
+
"""
|
132
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
133
|
+
with httpx.Client(proxy=self.proxy, transport=httpx.HTTPTransport(retries=self.retries)) as client:
|
134
|
+
request = client.delete(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
135
|
+
|
136
|
+
return self._prepare_response(request)
|
137
|
+
|
138
|
+
async def async_delete(self, **kwargs: Dict) -> Response:
|
139
|
+
"""Make basic async HTTP DELETE request for you but with some added flavors.
|
140
|
+
|
141
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
|
107
142
|
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
108
143
|
"""
|
109
|
-
headers = self._headers_job(kwargs.pop('headers', {})
|
110
|
-
with httpx.
|
111
|
-
request = client.delete(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
144
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
145
|
+
async with httpx.AsyncClient(proxy=self.proxy) as client:
|
146
|
+
request = await client.delete(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
112
147
|
|
113
148
|
return self._prepare_response(request)
|
114
149
|
|
115
|
-
def put(self,
|
150
|
+
def put(self, **kwargs: Dict) -> Response:
|
116
151
|
"""Make basic HTTP PUT request for you but with some added flavors.
|
117
152
|
|
118
|
-
:param
|
119
|
-
:
|
120
|
-
|
121
|
-
|
122
|
-
|
153
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.put()` function so check httpx documentation for details.
|
154
|
+
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
155
|
+
"""
|
156
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
157
|
+
with httpx.Client(proxy=self.proxy, transport=httpx.HTTPTransport(retries=self.retries)) as client:
|
158
|
+
request = client.put(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
159
|
+
|
160
|
+
return self._prepare_response(request)
|
161
|
+
|
162
|
+
async def async_put(self, **kwargs: Dict) -> Response:
|
163
|
+
"""Make basic async HTTP PUT request for you but with some added flavors.
|
164
|
+
|
165
|
+
:param kwargs: Any keyword arguments are passed directly to `httpx.put()` function so check httpx documentation for details.
|
123
166
|
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
|
124
167
|
"""
|
125
|
-
headers = self._headers_job(kwargs.pop('headers', {})
|
126
|
-
with httpx.
|
127
|
-
request = client.put(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
168
|
+
headers = self._headers_job(kwargs.pop('headers', {}))
|
169
|
+
async with httpx.AsyncClient(proxy=self.proxy) as client:
|
170
|
+
request = await client.put(url=self.url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
|
128
171
|
|
129
172
|
return self._prepare_response(request)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from .custom import (BaseFetcher, Response, StatusText, check_if_engine_usable,
|
2
|
-
check_type_validity,
|
2
|
+
check_type_validity, get_variable_name)
|
3
3
|
from .fingerprints import (generate_convincing_referer, generate_headers,
|
4
4
|
get_os_name)
|
5
|
-
from .navigation import (
|
6
|
-
intercept_route, js_bypass_path)
|
5
|
+
from .navigation import (async_intercept_route, construct_cdp_url,
|
6
|
+
construct_proxy_dict, intercept_route, js_bypass_path)
|