scrapling 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scrapling/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  __author__ = "Karim Shoair (karim.shoair@pm.me)"
2
- __version__ = "0.3.3"
2
+ __version__ = "0.3.5"
3
3
  __copyright__ = "Copyright (c) 2024 Karim Shoair"
4
4
 
5
5
 
scrapling/cli.py CHANGED
@@ -32,8 +32,8 @@ def __ParseJSONData(json_string: Optional[str] = None) -> Optional[Dict[str, Any
32
32
 
33
33
  try:
34
34
  return json_loads(json_string)
35
- except JSONDecodeError as e: # pragma: no cover
36
- raise ValueError(f"Invalid JSON data '{json_string}': {e}")
35
+ except JSONDecodeError as err: # pragma: no cover
36
+ raise ValueError(f"Invalid JSON data '{json_string}': {err}")
37
37
 
38
38
 
39
39
  def __Request_and_Save(
@@ -65,8 +65,8 @@ def __ParseExtractArguments(
65
65
  for key, value in _CookieParser(cookies):
66
66
  try:
67
67
  parsed_cookies[key] = value
68
- except Exception as e:
69
- raise ValueError(f"Could not parse cookies '{cookies}': {e}")
68
+ except Exception as err:
69
+ raise ValueError(f"Could not parse cookies '{cookies}': {err}")
70
70
 
71
71
  parsed_json = __ParseJSONData(json)
72
72
  parsed_params = {}
@@ -145,7 +145,7 @@ class TextHandler(str):
145
145
  clean_match: bool = False,
146
146
  case_sensitive: bool = True,
147
147
  check_match: Literal[False] = False,
148
- ) -> "TextHandlers[TextHandler]": ...
148
+ ) -> "TextHandlers": ...
149
149
 
150
150
  def re(
151
151
  self,
@@ -241,7 +241,7 @@ class TextHandlers(List[TextHandler]):
241
241
  replace_entities: bool = True,
242
242
  clean_match: bool = False,
243
243
  case_sensitive: bool = True,
244
- ) -> "TextHandlers[TextHandler]":
244
+ ) -> "TextHandlers":
245
245
  """Call the ``.re()`` method for each element in this list and return
246
246
  their results flattened as TextHandlers.
247
247
 
scrapling/core/shell.py CHANGED
@@ -201,7 +201,7 @@ class CurlParser:
201
201
  data_payload = parsed_args.data_binary # Fallback to string
202
202
 
203
203
  elif parsed_args.data_raw is not None:
204
- data_payload = parsed_args.data_raw
204
+ data_payload = parsed_args.data_raw.lstrip("$")
205
205
 
206
206
  elif parsed_args.data is not None:
207
207
  data_payload = parsed_args.data
@@ -318,7 +318,7 @@ def show_page_in_browser(page: Selector): # pragma: no cover
318
318
  try:
319
319
  fd, fname = make_temp_file(prefix="scrapling_view_", suffix=".html")
320
320
  with open(fd, "w", encoding=page.encoding) as f:
321
- f.write(page.body)
321
+ f.write(page.html_content)
322
322
 
323
323
  open_in_browser(f"file://{fname}")
324
324
  except IOError as e:
@@ -335,15 +335,25 @@ class CustomShell:
335
335
  from scrapling.fetchers import (
336
336
  Fetcher as __Fetcher,
337
337
  AsyncFetcher as __AsyncFetcher,
338
+ FetcherSession as __FetcherSession,
338
339
  DynamicFetcher as __DynamicFetcher,
340
+ DynamicSession as __DynamicSession,
341
+ AsyncDynamicSession as __AsyncDynamicSession,
339
342
  StealthyFetcher as __StealthyFetcher,
343
+ StealthySession as __StealthySession,
344
+ AsyncStealthySession as __AsyncStealthySession,
340
345
  )
341
346
 
342
347
  self.__InteractiveShellEmbed = __InteractiveShellEmbed
343
348
  self.__Fetcher = __Fetcher
344
349
  self.__AsyncFetcher = __AsyncFetcher
350
+ self.__FetcherSession = __FetcherSession
345
351
  self.__DynamicFetcher = __DynamicFetcher
352
+ self.__DynamicSession = __DynamicSession
353
+ self.__AsyncDynamicSession = __AsyncDynamicSession
346
354
  self.__StealthyFetcher = __StealthyFetcher
355
+ self.__StealthySession = __StealthySession
356
+ self.__AsyncStealthySession = __AsyncStealthySession
347
357
  self.code = code
348
358
  self.page = None
349
359
  self.pages = Selectors([])
@@ -379,9 +389,9 @@ class CustomShell:
379
389
  """Create a custom banner for the shell"""
380
390
  return f"""
381
391
  -> Available Scrapling objects:
382
- - Fetcher/AsyncFetcher
383
- - DynamicFetcher
384
- - StealthyFetcher
392
+ - Fetcher/AsyncFetcher/FetcherSession
393
+ - DynamicFetcher/DynamicSession/AsyncDynamicSession
394
+ - StealthyFetcher/StealthySession/AsyncStealthySession
385
395
  - Selector
386
396
 
387
397
  -> Useful shortcuts:
@@ -449,6 +459,11 @@ Type 'exit' or press Ctrl+D to exit.
449
459
  "delete": delete,
450
460
  "Fetcher": self.__Fetcher,
451
461
  "AsyncFetcher": self.__AsyncFetcher,
462
+ "FetcherSession": self.__FetcherSession,
463
+ "DynamicSession": self.__DynamicSession,
464
+ "AsyncDynamicSession": self.__AsyncDynamicSession,
465
+ "StealthySession": self.__StealthySession,
466
+ "AsyncStealthySession": self.__AsyncStealthySession,
452
467
  "fetch": dynamic_fetch,
453
468
  "DynamicFetcher": self.__DynamicFetcher,
454
469
  "stealthy_fetch": stealthy_fetch,
@@ -530,7 +545,7 @@ class Convertor:
530
545
  for page in pages:
531
546
  match extraction_type:
532
547
  case "markdown":
533
- yield cls._convert_to_markdown(page.body)
548
+ yield cls._convert_to_markdown(page.html_content)
534
549
  case "html":
535
550
  yield page.body
536
551
  case "text":
@@ -1,4 +1,4 @@
1
- from time import time, sleep
1
+ from time import time
2
2
  from asyncio import sleep as asyncio_sleep, Lock
3
3
 
4
4
  from camoufox import DefaultAddons
@@ -31,7 +31,7 @@ class SyncSession:
31
31
  def __init__(self, max_pages: int = 1):
32
32
  self.max_pages = max_pages
33
33
  self.page_pool = PagePool(max_pages)
34
- self.__max_wait_for_page = 60
34
+ self._max_wait_for_page = 60
35
35
  self.playwright: Optional[Playwright] = None
36
36
  self.context: Optional[BrowserContext] = None
37
37
  self._closed = False
@@ -44,23 +44,7 @@ class SyncSession:
44
44
  ) -> PageInfo: # pragma: no cover
45
45
  """Get a new page to use"""
46
46
 
47
- # Close all finished pages to ensure clean state
48
- self.page_pool.close_all_finished_pages()
49
-
50
- # If we're at max capacity after cleanup, wait for busy pages to finish
51
- if self.page_pool.pages_count >= self.max_pages:
52
- start_time = time()
53
- while time() - start_time < self.__max_wait_for_page:
54
- # Wait for any pages to finish, then clean them up
55
- sleep(0.05)
56
- self.page_pool.close_all_finished_pages()
57
- if self.page_pool.pages_count < self.max_pages:
58
- break
59
- else:
60
- raise TimeoutError(
61
- f"No pages finished to clear place in the pool within the {self.__max_wait_for_page}s timeout period"
62
- )
63
-
47
+ # No need to check if a page is available or not in sync code because the code blocked before reaching here till the page closed, ofc.
64
48
  page = self.context.new_page()
65
49
  page.set_default_navigation_timeout(timeout)
66
50
  page.set_default_timeout(timeout)
@@ -76,11 +60,6 @@ class SyncSession:
76
60
 
77
61
  return self.page_pool.add_page(page)
78
62
 
79
- @staticmethod
80
- def _get_with_precedence(request_value: Any, session_value: Any, sentinel_value: object) -> Any:
81
- """Get value with request-level priority over session-level"""
82
- return request_value if request_value is not sentinel_value else session_value
83
-
84
63
  def get_pool_stats(self) -> Dict[str, int]:
85
64
  """Get statistics about the current page pool"""
86
65
  return {
@@ -105,21 +84,16 @@ class AsyncSession(SyncSession):
105
84
  ) -> PageInfo: # pragma: no cover
106
85
  """Get a new page to use"""
107
86
  async with self._lock:
108
- # Close all finished pages to ensure clean state
109
- await self.page_pool.aclose_all_finished_pages()
110
-
111
87
  # If we're at max capacity after cleanup, wait for busy pages to finish
112
88
  if self.page_pool.pages_count >= self.max_pages:
113
89
  start_time = time()
114
- while time() - start_time < self.__max_wait_for_page:
115
- # Wait for any pages to finish, then clean them up
90
+ while time() - start_time < self._max_wait_for_page:
116
91
  await asyncio_sleep(0.05)
117
- await self.page_pool.aclose_all_finished_pages()
118
92
  if self.page_pool.pages_count < self.max_pages:
119
93
  break
120
94
  else:
121
95
  raise TimeoutError(
122
- f"No pages finished to clear place in the pool within the {self.__max_wait_for_page}s timeout period"
96
+ f"No pages finished to clear place in the pool within the {self._max_wait_for_page}s timeout period"
123
97
  )
124
98
 
125
99
  page = await self.context.new_page()
@@ -14,8 +14,9 @@ from playwright.async_api import (
14
14
  Locator as AsyncLocator,
15
15
  Page as async_Page,
16
16
  )
17
+ from playwright._impl._errors import Error as PlaywrightError
17
18
 
18
- from ._validators import validate, CamoufoxConfig
19
+ from ._validators import validate_fetch as _validate
19
20
  from ._base import SyncSession, AsyncSession, StealthySessionMixin
20
21
  from scrapling.core.utils import log
21
22
  from scrapling.core._types import (
@@ -201,20 +202,34 @@ class StealthySession(StealthySessionMixin, SyncSession):
201
202
 
202
203
  self._closed = True
203
204
 
205
+ @staticmethod
206
+ def _get_page_content(page: Page) -> str | None:
207
+ """
208
+ A workaround for Playwright issue with `page.content()` on Windows. Ref.: https://github.com/microsoft/playwright/issues/16108
209
+ :param page: The page to extract content from.
210
+ :return:
211
+ """
212
+ while True:
213
+ try:
214
+ return page.content() or ""
215
+ except PlaywrightError:
216
+ page.wait_for_timeout(1000)
217
+ continue
218
+
204
219
  def _solve_cloudflare(self, page: Page) -> None: # pragma: no cover
205
220
  """Solve the cloudflare challenge displayed on the playwright page passed
206
221
 
207
222
  :param page: The targeted page
208
223
  :return:
209
224
  """
210
- challenge_type = self._detect_cloudflare(page.content())
225
+ challenge_type = self._detect_cloudflare(self._get_page_content(page))
211
226
  if not challenge_type:
212
227
  log.error("No Cloudflare challenge found.")
213
228
  return
214
229
  else:
215
230
  log.info(f'The turnstile version discovered is "{challenge_type}"')
216
231
  if challenge_type == "non-interactive":
217
- while "<title>Just a moment...</title>" in (page.content()):
232
+ while "<title>Just a moment...</title>" in (self._get_page_content(page)):
218
233
  log.info("Waiting for Cloudflare wait page to disappear.")
219
234
  page.wait_for_timeout(1000)
220
235
  page.wait_for_load_state()
@@ -222,7 +237,7 @@ class StealthySession(StealthySessionMixin, SyncSession):
222
237
  return
223
238
 
224
239
  else:
225
- while "Verifying you are human." in page.content():
240
+ while "Verifying you are human." in self._get_page_content(page):
226
241
  # Waiting for the verify spinner to disappear, checking every 1s if it disappeared
227
242
  page.wait_for_timeout(500)
228
243
 
@@ -282,23 +297,22 @@ class StealthySession(StealthySessionMixin, SyncSession):
282
297
  :param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
283
298
  :return: A `Response` object.
284
299
  """
285
- # Validate all resolved parameters
286
- params = validate(
287
- dict(
288
- google_search=self._get_with_precedence(google_search, self.google_search, _UNSET),
289
- timeout=self._get_with_precedence(timeout, self.timeout, _UNSET),
290
- wait=self._get_with_precedence(wait, self.wait, _UNSET),
291
- page_action=self._get_with_precedence(page_action, self.page_action, _UNSET),
292
- extra_headers=self._get_with_precedence(extra_headers, self.extra_headers, _UNSET),
293
- disable_resources=self._get_with_precedence(disable_resources, self.disable_resources, _UNSET),
294
- wait_selector=self._get_with_precedence(wait_selector, self.wait_selector, _UNSET),
295
- wait_selector_state=self._get_with_precedence(wait_selector_state, self.wait_selector_state, _UNSET),
296
- network_idle=self._get_with_precedence(network_idle, self.network_idle, _UNSET),
297
- load_dom=self._get_with_precedence(load_dom, self.load_dom, _UNSET),
298
- solve_cloudflare=self._get_with_precedence(solve_cloudflare, self.solve_cloudflare, _UNSET),
299
- selector_config=self._get_with_precedence(selector_config, self.selector_config, _UNSET),
300
- ),
301
- CamoufoxConfig,
300
+ params = _validate(
301
+ [
302
+ ("google_search", google_search, self.google_search),
303
+ ("timeout", timeout, self.timeout),
304
+ ("wait", wait, self.wait),
305
+ ("page_action", page_action, self.page_action),
306
+ ("extra_headers", extra_headers, self.extra_headers),
307
+ ("disable_resources", disable_resources, self.disable_resources),
308
+ ("wait_selector", wait_selector, self.wait_selector),
309
+ ("wait_selector_state", wait_selector_state, self.wait_selector_state),
310
+ ("network_idle", network_idle, self.network_idle),
311
+ ("load_dom", load_dom, self.load_dom),
312
+ ("solve_cloudflare", solve_cloudflare, self.solve_cloudflare),
313
+ ("selector_config", selector_config, self.selector_config),
314
+ ],
315
+ _UNSET,
302
316
  )
303
317
 
304
318
  if self._closed: # pragma: no cover
@@ -366,8 +380,9 @@ class StealthySession(StealthySessionMixin, SyncSession):
366
380
  page_info.page, first_response, final_response, params.selector_config
367
381
  )
368
382
 
369
- # Mark the page as finished for next use
370
- page_info.mark_finished()
383
+ # Close the page, to free up resources
384
+ page_info.page.close()
385
+ self.page_pool.pages.remove(page_info)
371
386
 
372
387
  return response
373
388
 
@@ -506,20 +521,34 @@ class AsyncStealthySession(StealthySessionMixin, AsyncSession):
506
521
 
507
522
  self._closed = True
508
523
 
524
+ @staticmethod
525
+ async def _get_page_content(page: async_Page) -> str | None:
526
+ """
527
+ A workaround for Playwright issue with `page.content()` on Windows. Ref.: https://github.com/microsoft/playwright/issues/16108
528
+ :param page: The page to extract content from.
529
+ :return:
530
+ """
531
+ while True:
532
+ try:
533
+ return (await page.content()) or ""
534
+ except PlaywrightError:
535
+ await page.wait_for_timeout(1000)
536
+ continue
537
+
509
538
  async def _solve_cloudflare(self, page: async_Page):
510
539
  """Solve the cloudflare challenge displayed on the playwright page passed. The async version
511
540
 
512
541
  :param page: The async targeted page
513
542
  :return:
514
543
  """
515
- challenge_type = self._detect_cloudflare(await page.content())
544
+ challenge_type = self._detect_cloudflare(await self._get_page_content(page))
516
545
  if not challenge_type:
517
546
  log.error("No Cloudflare challenge found.")
518
547
  return
519
548
  else:
520
549
  log.info(f'The turnstile version discovered is "{challenge_type}"')
521
550
  if challenge_type == "non-interactive": # pragma: no cover
522
- while "<title>Just a moment...</title>" in (await page.content()):
551
+ while "<title>Just a moment...</title>" in (await self._get_page_content(page)):
523
552
  log.info("Waiting for Cloudflare wait page to disappear.")
524
553
  await page.wait_for_timeout(1000)
525
554
  await page.wait_for_load_state()
@@ -527,7 +556,7 @@ class AsyncStealthySession(StealthySessionMixin, AsyncSession):
527
556
  return
528
557
 
529
558
  else:
530
- while "Verifying you are human." in (await page.content()):
559
+ while "Verifying you are human." in (await self._get_page_content(page)):
531
560
  # Waiting for the verify spinner to disappear, checking every 1s if it disappeared
532
561
  await page.wait_for_timeout(500)
533
562
 
@@ -587,22 +616,22 @@ class AsyncStealthySession(StealthySessionMixin, AsyncSession):
587
616
  :param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
588
617
  :return: A `Response` object.
589
618
  """
590
- params = validate(
591
- dict(
592
- google_search=self._get_with_precedence(google_search, self.google_search, _UNSET),
593
- timeout=self._get_with_precedence(timeout, self.timeout, _UNSET),
594
- wait=self._get_with_precedence(wait, self.wait, _UNSET),
595
- page_action=self._get_with_precedence(page_action, self.page_action, _UNSET),
596
- extra_headers=self._get_with_precedence(extra_headers, self.extra_headers, _UNSET),
597
- disable_resources=self._get_with_precedence(disable_resources, self.disable_resources, _UNSET),
598
- wait_selector=self._get_with_precedence(wait_selector, self.wait_selector, _UNSET),
599
- wait_selector_state=self._get_with_precedence(wait_selector_state, self.wait_selector_state, _UNSET),
600
- network_idle=self._get_with_precedence(network_idle, self.network_idle, _UNSET),
601
- load_dom=self._get_with_precedence(load_dom, self.load_dom, _UNSET),
602
- solve_cloudflare=self._get_with_precedence(solve_cloudflare, self.solve_cloudflare, _UNSET),
603
- selector_config=self._get_with_precedence(selector_config, self.selector_config, _UNSET),
604
- ),
605
- CamoufoxConfig,
619
+ params = _validate(
620
+ [
621
+ ("google_search", google_search, self.google_search),
622
+ ("timeout", timeout, self.timeout),
623
+ ("wait", wait, self.wait),
624
+ ("page_action", page_action, self.page_action),
625
+ ("extra_headers", extra_headers, self.extra_headers),
626
+ ("disable_resources", disable_resources, self.disable_resources),
627
+ ("wait_selector", wait_selector, self.wait_selector),
628
+ ("wait_selector_state", wait_selector_state, self.wait_selector_state),
629
+ ("network_idle", network_idle, self.network_idle),
630
+ ("load_dom", load_dom, self.load_dom),
631
+ ("solve_cloudflare", solve_cloudflare, self.solve_cloudflare),
632
+ ("selector_config", selector_config, self.selector_config),
633
+ ],
634
+ _UNSET,
606
635
  )
607
636
 
608
637
  if self._closed: # pragma: no cover
@@ -672,8 +701,9 @@ class AsyncStealthySession(StealthySessionMixin, AsyncSession):
672
701
  page_info.page, first_response, final_response, params.selector_config
673
702
  )
674
703
 
675
- # Mark the page as finished for next use
676
- page_info.mark_finished()
704
+ # Close the page, to free up resources
705
+ await page_info.page.close()
706
+ self.page_pool.pages.remove(page_info)
677
707
 
678
708
  return response
679
709
 
@@ -11,14 +11,12 @@ from playwright.async_api import (
11
11
  Playwright as AsyncPlaywright,
12
12
  Locator as AsyncLocator,
13
13
  )
14
- from rebrowser_playwright.sync_api import sync_playwright as sync_rebrowser_playwright
15
- from rebrowser_playwright.async_api import (
16
- async_playwright as async_rebrowser_playwright,
17
- )
14
+ from patchright.sync_api import sync_playwright as sync_patchright
15
+ from patchright.async_api import async_playwright as async_patchright
18
16
 
19
17
  from scrapling.core.utils import log
20
18
  from ._base import SyncSession, AsyncSession, DynamicSessionMixin
21
- from ._validators import validate, PlaywrightConfig
19
+ from ._validators import validate_fetch as _validate
22
20
  from scrapling.core._types import (
23
21
  Dict,
24
22
  List,
@@ -154,10 +152,7 @@ class DynamicSession(DynamicSessionMixin, SyncSession):
154
152
 
155
153
  def __create__(self):
156
154
  """Create a browser for this instance and context."""
157
- sync_context = sync_rebrowser_playwright
158
- if not self.stealth or self.real_chrome:
159
- # Because rebrowser_playwright doesn't play well with real browsers
160
- sync_context = sync_playwright
155
+ sync_context = sync_patchright if self.stealth else sync_playwright
161
156
 
162
157
  self.playwright: Playwright = sync_context().start()
163
158
 
@@ -229,22 +224,21 @@ class DynamicSession(DynamicSessionMixin, SyncSession):
229
224
  :param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
230
225
  :return: A `Response` object.
231
226
  """
232
- # Validate all resolved parameters
233
- params = validate(
234
- dict(
235
- google_search=self._get_with_precedence(google_search, self.google_search, _UNSET),
236
- timeout=self._get_with_precedence(timeout, self.timeout, _UNSET),
237
- wait=self._get_with_precedence(wait, self.wait, _UNSET),
238
- page_action=self._get_with_precedence(page_action, self.page_action, _UNSET),
239
- extra_headers=self._get_with_precedence(extra_headers, self.extra_headers, _UNSET),
240
- disable_resources=self._get_with_precedence(disable_resources, self.disable_resources, _UNSET),
241
- wait_selector=self._get_with_precedence(wait_selector, self.wait_selector, _UNSET),
242
- wait_selector_state=self._get_with_precedence(wait_selector_state, self.wait_selector_state, _UNSET),
243
- network_idle=self._get_with_precedence(network_idle, self.network_idle, _UNSET),
244
- load_dom=self._get_with_precedence(load_dom, self.load_dom, _UNSET),
245
- selector_config=self._get_with_precedence(selector_config, self.selector_config, _UNSET),
246
- ),
247
- PlaywrightConfig,
227
+ params = _validate(
228
+ [
229
+ ("google_search", google_search, self.google_search),
230
+ ("timeout", timeout, self.timeout),
231
+ ("wait", wait, self.wait),
232
+ ("page_action", page_action, self.page_action),
233
+ ("extra_headers", extra_headers, self.extra_headers),
234
+ ("disable_resources", disable_resources, self.disable_resources),
235
+ ("wait_selector", wait_selector, self.wait_selector),
236
+ ("wait_selector_state", wait_selector_state, self.wait_selector_state),
237
+ ("network_idle", network_idle, self.network_idle),
238
+ ("load_dom", load_dom, self.load_dom),
239
+ ("selector_config", selector_config, self.selector_config),
240
+ ],
241
+ _UNSET,
248
242
  )
249
243
 
250
244
  if self._closed: # pragma: no cover
@@ -305,8 +299,9 @@ class DynamicSession(DynamicSessionMixin, SyncSession):
305
299
  page_info.page, first_response, final_response, params.selector_config
306
300
  )
307
301
 
308
- # Mark the page as finished for next use
309
- page_info.mark_finished()
302
+ # Close the page, to free up resources
303
+ page_info.page.close()
304
+ self.page_pool.pages.remove(page_info)
310
305
 
311
306
  return response
312
307
 
@@ -402,10 +397,7 @@ class AsyncDynamicSession(DynamicSessionMixin, AsyncSession):
402
397
 
403
398
  async def __create__(self):
404
399
  """Create a browser for this instance and context."""
405
- async_context = async_rebrowser_playwright
406
- if not self.stealth or self.real_chrome:
407
- # Because rebrowser_playwright doesn't play well with real browsers
408
- async_context = async_playwright
400
+ async_context = async_patchright if self.stealth else async_playwright
409
401
 
410
402
  self.playwright: AsyncPlaywright = await async_context().start()
411
403
 
@@ -478,22 +470,21 @@ class AsyncDynamicSession(DynamicSessionMixin, AsyncSession):
478
470
  :param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
479
471
  :return: A `Response` object.
480
472
  """
481
- # Validate all resolved parameters
482
- params = validate(
483
- dict(
484
- google_search=self._get_with_precedence(google_search, self.google_search, _UNSET),
485
- timeout=self._get_with_precedence(timeout, self.timeout, _UNSET),
486
- wait=self._get_with_precedence(wait, self.wait, _UNSET),
487
- page_action=self._get_with_precedence(page_action, self.page_action, _UNSET),
488
- extra_headers=self._get_with_precedence(extra_headers, self.extra_headers, _UNSET),
489
- disable_resources=self._get_with_precedence(disable_resources, self.disable_resources, _UNSET),
490
- wait_selector=self._get_with_precedence(wait_selector, self.wait_selector, _UNSET),
491
- wait_selector_state=self._get_with_precedence(wait_selector_state, self.wait_selector_state, _UNSET),
492
- network_idle=self._get_with_precedence(network_idle, self.network_idle, _UNSET),
493
- load_dom=self._get_with_precedence(load_dom, self.load_dom, _UNSET),
494
- selector_config=self._get_with_precedence(selector_config, self.selector_config, _UNSET),
495
- ),
496
- PlaywrightConfig,
473
+ params = _validate(
474
+ [
475
+ ("google_search", google_search, self.google_search),
476
+ ("timeout", timeout, self.timeout),
477
+ ("wait", wait, self.wait),
478
+ ("page_action", page_action, self.page_action),
479
+ ("extra_headers", extra_headers, self.extra_headers),
480
+ ("disable_resources", disable_resources, self.disable_resources),
481
+ ("wait_selector", wait_selector, self.wait_selector),
482
+ ("wait_selector_state", wait_selector_state, self.wait_selector_state),
483
+ ("network_idle", network_idle, self.network_idle),
484
+ ("load_dom", load_dom, self.load_dom),
485
+ ("selector_config", selector_config, self.selector_config),
486
+ ],
487
+ _UNSET,
497
488
  )
498
489
 
499
490
  if self._closed: # pragma: no cover
@@ -554,9 +545,9 @@ class AsyncDynamicSession(DynamicSessionMixin, AsyncSession):
554
545
  page_info.page, first_response, final_response, params.selector_config
555
546
  )
556
547
 
557
- # Mark the page as finished for next use
558
- page_info.mark_finished()
559
-
548
+ # Close the page, to free up resources
549
+ await page_info.page.close()
550
+ self.page_pool.pages.remove(page_info)
560
551
  return response
561
552
 
562
553
  except Exception as e: # pragma: no cover
@@ -6,7 +6,7 @@ from playwright.async_api import Page as AsyncPage
6
6
 
7
7
  from scrapling.core._types import Optional, List, Literal
8
8
 
9
- PageState = Literal["finished", "ready", "busy", "error"] # States that a page can be in
9
+ PageState = Literal["ready", "busy", "error"] # States that a page can be in
10
10
 
11
11
 
12
12
  @dataclass
@@ -23,11 +23,6 @@ class PageInfo:
23
23
  self.state = "busy"
24
24
  self.url = url
25
25
 
26
- def mark_finished(self):
27
- """Mark the page as finished for new requests"""
28
- self.state = "finished"
29
- self.url = ""
30
-
31
26
  def mark_error(self):
32
27
  """Mark the page as having an error"""
33
28
  self.state = "error"
@@ -67,12 +62,6 @@ class PagePool:
67
62
  """Get the total number of pages"""
68
63
  return len(self.pages)
69
64
 
70
- @property
71
- def finished_count(self) -> int:
72
- """Get the number of finished pages"""
73
- with self._lock:
74
- return sum(1 for p in self.pages if p.state == "finished")
75
-
76
65
  @property
77
66
  def busy_count(self) -> int:
78
67
  """Get the number of busy pages"""
@@ -83,33 +72,3 @@ class PagePool:
83
72
  """Remove pages in error state"""
84
73
  with self._lock:
85
74
  self.pages = [p for p in self.pages if p.state != "error"]
86
-
87
- def close_all_finished_pages(self):
88
- """Close all pages in finished state and remove them from the pool"""
89
- with self._lock:
90
- pages_to_remove = []
91
- for page_info in self.pages:
92
- if page_info.state == "finished":
93
- try:
94
- page_info.page.close()
95
- except Exception:
96
- pass
97
- pages_to_remove.append(page_info)
98
-
99
- for page_info in pages_to_remove:
100
- self.pages.remove(page_info)
101
-
102
- async def aclose_all_finished_pages(self):
103
- """Async version: Close all pages in finished state and remove them from the pool"""
104
- with self._lock:
105
- pages_to_remove = []
106
- for page_info in self.pages:
107
- if page_info.state == "finished":
108
- try:
109
- await page_info.page.close()
110
- except Exception:
111
- pass
112
- pages_to_remove.append(page_info)
113
-
114
- for page_info in pages_to_remove:
115
- self.pages.remove(page_info)
@@ -1,21 +1,69 @@
1
- from msgspec import Struct, convert, ValidationError
2
- from urllib.parse import urlparse
3
1
  from pathlib import Path
2
+ from typing import Annotated
3
+ from dataclasses import dataclass
4
+ from urllib.parse import urlparse
5
+
6
+ from msgspec import Struct, Meta, convert, ValidationError
4
7
 
5
8
  from scrapling.core._types import (
6
- Optional,
7
9
  Dict,
8
- Callable,
9
10
  List,
11
+ Tuple,
12
+ Optional,
13
+ Callable,
10
14
  SelectorWaitStates,
11
15
  )
12
16
  from scrapling.engines.toolbelt.navigation import construct_proxy_dict
13
17
 
14
18
 
19
+ # Custom validators for msgspec
20
+ def _validate_file_path(value: str):
21
+ """Fast file path validation"""
22
+ path = Path(value)
23
+ if not path.exists():
24
+ raise ValueError(f"Init script path not found: {value}")
25
+ if not path.is_file():
26
+ raise ValueError(f"Init script is not a file: {value}")
27
+ if not path.is_absolute():
28
+ raise ValueError(f"Init script is not a absolute path: {value}")
29
+
30
+
31
+ def _validate_addon_path(value: str):
32
+ """Fast addon path validation"""
33
+ path = Path(value)
34
+ if not path.exists():
35
+ raise FileNotFoundError(f"Addon path not found: {value}")
36
+ if not path.is_dir():
37
+ raise ValueError(f"Addon path must be a directory of the extracted addon: {value}")
38
+
39
+
40
+ def _validate_cdp_url(cdp_url: str):
41
+ """Fast CDP URL validation"""
42
+ try:
43
+ # Check the scheme
44
+ if not cdp_url.startswith(("ws://", "wss://")):
45
+ raise ValueError("CDP URL must use 'ws://' or 'wss://' scheme")
46
+
47
+ # Validate hostname and port
48
+ if not urlparse(cdp_url).netloc:
49
+ raise ValueError("Invalid hostname for the CDP URL")
50
+
51
+ except AttributeError as e:
52
+ raise ValueError(f"Malformed CDP URL: {cdp_url}: {str(e)}")
53
+
54
+ except Exception as e:
55
+ raise ValueError(f"Invalid CDP URL '{cdp_url}': {str(e)}")
56
+
57
+
58
+ # Type aliases for cleaner annotations
59
+ PagesCount = Annotated[int, Meta(ge=1, le=50)]
60
+ Seconds = Annotated[int, float, Meta(ge=0)]
61
+
62
+
15
63
  class PlaywrightConfig(Struct, kw_only=True, frozen=False):
16
64
  """Configuration struct for validation"""
17
65
 
18
- max_pages: int = 1
66
+ max_pages: PagesCount = 1
19
67
  cdp_url: Optional[str] = None
20
68
  headless: bool = True
21
69
  google_search: bool = True
@@ -23,13 +71,13 @@ class PlaywrightConfig(Struct, kw_only=True, frozen=False):
23
71
  disable_webgl: bool = False
24
72
  real_chrome: bool = False
25
73
  stealth: bool = False
26
- wait: int | float = 0
74
+ wait: Seconds = 0
27
75
  page_action: Optional[Callable] = None
28
76
  proxy: Optional[str | Dict[str, str]] = None # The default value for proxy in Playwright's source is `None`
29
77
  locale: str = "en-US"
30
78
  extra_headers: Optional[Dict[str, str]] = None
31
79
  useragent: Optional[str] = None
32
- timeout: int | float = 30000
80
+ timeout: Seconds = 30000
33
81
  init_script: Optional[str] = None
34
82
  disable_resources: bool = False
35
83
  wait_selector: Optional[str] = None
@@ -41,52 +89,26 @@ class PlaywrightConfig(Struct, kw_only=True, frozen=False):
41
89
 
42
90
  def __post_init__(self):
43
91
  """Custom validation after msgspec validation"""
44
- if self.max_pages < 1 or self.max_pages > 50:
45
- raise ValueError("max_pages must be between 1 and 50")
46
- if self.timeout < 0:
47
- raise ValueError("timeout must be >= 0")
48
92
  if self.page_action and not callable(self.page_action):
49
93
  raise TypeError(f"page_action must be callable, got {type(self.page_action).__name__}")
50
94
  if self.proxy:
51
95
  self.proxy = construct_proxy_dict(self.proxy, as_tuple=True)
52
96
  if self.cdp_url:
53
- self.__validate_cdp(self.cdp_url)
97
+ _validate_cdp_url(self.cdp_url)
98
+
54
99
  if not self.cookies:
55
100
  self.cookies = []
56
101
  if not self.selector_config:
57
102
  self.selector_config = {}
58
103
 
59
104
  if self.init_script is not None:
60
- script_path = Path(self.init_script)
61
- if not script_path.exists():
62
- raise ValueError("Init script path not found")
63
- elif not script_path.is_file():
64
- raise ValueError("Init script is not a file")
65
- elif not script_path.is_absolute():
66
- raise ValueError("Init script is not a absolute path")
67
-
68
- @staticmethod
69
- def __validate_cdp(cdp_url):
70
- try:
71
- # Check the scheme
72
- if not cdp_url.startswith(("ws://", "wss://")):
73
- raise ValueError("CDP URL must use 'ws://' or 'wss://' scheme")
74
-
75
- # Validate hostname and port
76
- if not urlparse(cdp_url).netloc:
77
- raise ValueError("Invalid hostname for the CDP URL")
78
-
79
- except AttributeError as e:
80
- raise ValueError(f"Malformed CDP URL: {cdp_url}: {str(e)}")
81
-
82
- except Exception as e:
83
- raise ValueError(f"Invalid CDP URL '{cdp_url}': {str(e)}")
105
+ _validate_file_path(self.init_script)
84
106
 
85
107
 
86
108
  class CamoufoxConfig(Struct, kw_only=True, frozen=False):
87
109
  """Configuration struct for validation"""
88
110
 
89
- max_pages: int = 1
111
+ max_pages: PagesCount = 1
90
112
  headless: bool = True # noqa: F821
91
113
  block_images: bool = False
92
114
  disable_resources: bool = False
@@ -96,8 +118,8 @@ class CamoufoxConfig(Struct, kw_only=True, frozen=False):
96
118
  load_dom: bool = True
97
119
  humanize: bool | float = True
98
120
  solve_cloudflare: bool = False
99
- wait: int | float = 0
100
- timeout: int | float = 30000
121
+ wait: Seconds = 0
122
+ timeout: Seconds = 30000
101
123
  init_script: Optional[str] = None
102
124
  page_action: Optional[Callable] = None
103
125
  wait_selector: Optional[str] = None
@@ -115,38 +137,23 @@ class CamoufoxConfig(Struct, kw_only=True, frozen=False):
115
137
 
116
138
  def __post_init__(self):
117
139
  """Custom validation after msgspec validation"""
118
- if self.max_pages < 1 or self.max_pages > 50:
119
- raise ValueError("max_pages must be between 1 and 50")
120
- if self.timeout < 0:
121
- raise ValueError("timeout must be >= 0")
122
140
  if self.page_action and not callable(self.page_action):
123
141
  raise TypeError(f"page_action must be callable, got {type(self.page_action).__name__}")
124
142
  if self.proxy:
125
143
  self.proxy = construct_proxy_dict(self.proxy, as_tuple=True)
126
144
 
127
- if not self.addons:
128
- self.addons = []
129
- else:
145
+ if self.addons and isinstance(self.addons, list):
130
146
  for addon in self.addons:
131
- addon_path = Path(addon)
132
- if not addon_path.exists():
133
- raise FileNotFoundError(f"Addon's path not found: {addon}")
134
- elif not addon_path.is_dir():
135
- raise ValueError(
136
- f"Addon's path is not a folder, you need to pass a folder of the extracted addon: {addon}"
137
- )
147
+ _validate_addon_path(addon)
148
+ else:
149
+ self.addons = []
138
150
 
139
151
  if self.init_script is not None:
140
- script_path = Path(self.init_script)
141
- if not script_path.exists():
142
- raise ValueError("Init script path not found")
143
- elif not script_path.is_file():
144
- raise ValueError("Init script is not a file")
145
- elif not script_path.is_absolute():
146
- raise ValueError("Init script is not a absolute path")
152
+ _validate_file_path(self.init_script)
147
153
 
148
154
  if not self.cookies:
149
155
  self.cookies = []
156
+ # Cloudflare timeout adjustment
150
157
  if self.solve_cloudflare and self.timeout < 60_000:
151
158
  self.timeout = 60_000
152
159
  if not self.selector_config:
@@ -155,10 +162,68 @@ class CamoufoxConfig(Struct, kw_only=True, frozen=False):
155
162
  self.additional_args = {}
156
163
 
157
164
 
158
- def validate(params, model):
165
+ # Code parts to validate `fetch` in the least possible numbers of lines overall
166
+ class FetchConfig(Struct, kw_only=True):
167
+ """Configuration struct for `fetch` calls validation"""
168
+
169
+ google_search: bool = True
170
+ timeout: Seconds = 30000
171
+ wait: Seconds = 0
172
+ page_action: Optional[Callable] = None
173
+ extra_headers: Optional[Dict[str, str]] = None
174
+ disable_resources: bool = False
175
+ wait_selector: Optional[str] = None
176
+ wait_selector_state: SelectorWaitStates = "attached"
177
+ network_idle: bool = False
178
+ load_dom: bool = True
179
+ solve_cloudflare: bool = False
180
+ selector_config: Optional[Dict] = {}
181
+
182
+ def to_dict(self):
183
+ return {f: getattr(self, f) for f in self.__struct_fields__}
184
+
185
+
186
+ @dataclass
187
+ class _fetch_params:
188
+ """A dataclass of all parameters used by `fetch` calls"""
189
+
190
+ google_search: bool
191
+ timeout: Seconds
192
+ wait: Seconds
193
+ page_action: Optional[Callable]
194
+ extra_headers: Optional[Dict[str, str]]
195
+ disable_resources: bool
196
+ wait_selector: Optional[str]
197
+ wait_selector_state: SelectorWaitStates
198
+ network_idle: bool
199
+ load_dom: bool
200
+ solve_cloudflare: bool
201
+ selector_config: Optional[Dict]
202
+
203
+
204
+ def validate_fetch(params: List[Tuple], sentinel=None) -> _fetch_params:
205
+ result = {}
206
+ overrides = {}
207
+
208
+ for arg, request_value, session_value in params:
209
+ if request_value is not sentinel:
210
+ overrides[arg] = request_value
211
+ else:
212
+ result[arg] = session_value
213
+
214
+ if overrides:
215
+ overrides = validate(overrides, FetchConfig).to_dict()
216
+ overrides.update(result)
217
+ return _fetch_params(**overrides)
218
+
219
+ if not result.get("solve_cloudflare"):
220
+ result["solve_cloudflare"] = False
221
+
222
+ return _fetch_params(**result)
223
+
224
+
225
+ def validate(params: Dict, model) -> PlaywrightConfig | CamoufoxConfig | FetchConfig:
159
226
  try:
160
- config = convert(params, model)
227
+ return convert(params, model)
161
228
  except ValidationError as e:
162
- raise TypeError(f"Invalid argument type: {e}")
163
-
164
- return config
229
+ raise TypeError(f"Invalid argument type: {e}") from e
@@ -94,8 +94,8 @@ class FetcherSession:
94
94
  self.default_http3 = http3
95
95
  self.selector_config = selector_config or {}
96
96
 
97
- self._curl_session: Optional[CurlSession] = None
98
- self._async_curl_session: Optional[AsyncCurlSession] = None
97
+ self._curl_session: Optional[CurlSession] | bool = None
98
+ self._async_curl_session: Optional[AsyncCurlSession] | bool = None
99
99
 
100
100
  def _merge_request_args(self, **kwargs) -> Dict[str, Any]:
101
101
  """Merge request-specific arguments with default session arguments."""
@@ -239,7 +239,6 @@ class FetcherSession:
239
239
  Perform an HTTP request using the configured session.
240
240
 
241
241
  :param method: HTTP method to be used, supported methods are ["GET", "POST", "PUT", "DELETE"]
242
- :param url: Target URL for the request.
243
242
  :param request_args: Arguments to be passed to the session's `request()` method.
244
243
  :param max_retries: Maximum number of retries for the request.
245
244
  :param retry_delay: Number of seconds to wait between retries.
@@ -280,7 +279,6 @@ class FetcherSession:
280
279
  Perform an HTTP request using the configured session.
281
280
 
282
281
  :param method: HTTP method to be used, supported methods are ["GET", "POST", "PUT", "DELETE"]
283
- :param url: Target URL for the request.
284
282
  :param request_args: Arguments to be passed to the session's `request()` method.
285
283
  :param max_retries: Maximum number of retries for the request.
286
284
  :param retry_delay: Number of seconds to wait between retries.
@@ -4,7 +4,7 @@ Functions related to files and URLs
4
4
 
5
5
  from pathlib import Path
6
6
  from functools import lru_cache
7
- from urllib.parse import urlencode, urlparse
7
+ from urllib.parse import urlparse
8
8
 
9
9
  from playwright.async_api import Route as async_Route
10
10
  from msgspec import Struct, structs, convert, ValidationError
scrapling/parser.py CHANGED
@@ -239,7 +239,7 @@ class Selector(SelectorsGeneration):
239
239
  )
240
240
 
241
241
  def __handle_element(
242
- self, element: HtmlElement | _ElementUnicodeResult
242
+ self, element: Optional[HtmlElement | _ElementUnicodeResult]
243
243
  ) -> Optional[Union[TextHandler, "Selector"]]:
244
244
  """Used internally in all functions to convert a single element to type (Selector|TextHandler) when possible"""
245
245
  if element is None:
@@ -339,24 +339,28 @@ class Selector(SelectorsGeneration):
339
339
  @property
340
340
  def html_content(self) -> TextHandler:
341
341
  """Return the inner HTML code of the element"""
342
- return TextHandler(tostring(self._root, encoding=self.encoding, method="html", with_tail=False))
342
+ content = tostring(self._root, encoding=self.encoding, method="html", with_tail=False)
343
+ if isinstance(content, bytes):
344
+ content = content.decode("utf-8")
345
+ return TextHandler(content)
343
346
 
344
347
  @property
345
- def body(self):
348
+ def body(self) -> str | bytes:
346
349
  """Return the raw body of the current `Selector` without any processing. Useful for binary and non-HTML requests."""
347
350
  return self._raw_body
348
351
 
349
352
  def prettify(self) -> TextHandler:
350
353
  """Return a prettified version of the element's inner html-code"""
351
- return TextHandler(
352
- tostring(
353
- self._root,
354
- encoding=self.encoding,
355
- pretty_print=True,
356
- method="html",
357
- with_tail=False,
358
- )
354
+ content = tostring(
355
+ self._root,
356
+ encoding=self.encoding,
357
+ pretty_print=True,
358
+ method="html",
359
+ with_tail=False,
359
360
  )
361
+ if isinstance(content, bytes):
362
+ content = content.decode("utf-8")
363
+ return TextHandler(content)
360
364
 
361
365
  def has_class(self, class_name: str) -> bool:
362
366
  """Check if the element has a specific class
@@ -1255,7 +1259,7 @@ class Selectors(List[Selector]):
1255
1259
  :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
1256
1260
  :param case_sensitive: if disabled, the function will set the regex to ignore the letters case while compiling it
1257
1261
  """
1258
- results = [n.text.re(regex, replace_entities, clean_match, case_sensitive) for n in self]
1262
+ results = [n.re(regex, replace_entities, clean_match, case_sensitive) for n in self]
1259
1263
  return TextHandlers(flatten(results))
1260
1264
 
1261
1265
  def re_first(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scrapling
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Scrapling is an undetectable, powerful, flexible, high-performance Python library that makes Web Scraping easy and effortless as it should be!
5
5
  Home-page: https://github.com/D4Vinci/Scrapling
6
6
  Author: Karim Shoair
@@ -69,15 +69,15 @@ Requires-Dist: cssselect>=1.3.0
69
69
  Requires-Dist: orjson>=3.11.3
70
70
  Requires-Dist: tldextract>=5.3.0
71
71
  Provides-Extra: fetchers
72
- Requires-Dist: click>=8.2.1; extra == "fetchers"
72
+ Requires-Dist: click>=8.3.0; extra == "fetchers"
73
73
  Requires-Dist: curl_cffi>=0.13.0; extra == "fetchers"
74
- Requires-Dist: playwright>=1.52.0; extra == "fetchers"
75
- Requires-Dist: rebrowser-playwright>=1.52.0; extra == "fetchers"
74
+ Requires-Dist: playwright>=1.55.0; extra == "fetchers"
75
+ Requires-Dist: patchright>=1.55.2; extra == "fetchers"
76
76
  Requires-Dist: camoufox>=0.4.11; extra == "fetchers"
77
77
  Requires-Dist: geoip2>=5.1.0; extra == "fetchers"
78
78
  Requires-Dist: msgspec>=0.19.0; extra == "fetchers"
79
79
  Provides-Extra: ai
80
- Requires-Dist: mcp>=1.14.0; extra == "ai"
80
+ Requires-Dist: mcp>=1.14.1; extra == "ai"
81
81
  Requires-Dist: markdownify>=1.2.0; extra == "ai"
82
82
  Requires-Dist: scrapling[fetchers]; extra == "ai"
83
83
  Provides-Extra: shell
@@ -114,14 +114,6 @@ Dynamic: license-file
114
114
  </p>
115
115
 
116
116
  <p align="center">
117
- <a href="https://scrapling.readthedocs.io/en/latest/#installation">
118
- Installation
119
- </a>
120
- ·
121
- <a href="https://scrapling.readthedocs.io/en/latest/overview/">
122
- Overview
123
- </a>
124
- ·
125
117
  <a href="https://scrapling.readthedocs.io/en/latest/parsing/selection/">
126
118
  Selection methods
127
119
  </a>
@@ -130,6 +122,14 @@ Dynamic: license-file
130
122
  Choosing a fetcher
131
123
  </a>
132
124
  ·
125
+ <a href="https://scrapling.readthedocs.io/en/latest/cli/overview/">
126
+ CLI
127
+ </a>
128
+ ·
129
+ <a href="https://scrapling.readthedocs.io/en/latest/ai/mcp-server/">
130
+ MCP mode
131
+ </a>
132
+ ·
133
133
  <a href="https://scrapling.readthedocs.io/en/latest/tutorials/migrating_from_beautifulsoup/">
134
134
  Migrating from Beautifulsoup
135
135
  </a>
@@ -157,11 +157,13 @@ Built for the modern Web, Scrapling has its own rapid parsing engine and its fet
157
157
 
158
158
  <!-- sponsors -->
159
159
 
160
+ <a href="https://www.thordata.com/?ls=github&lk=D4Vinci" target="_blank" title="A global network of over 60M+ residential proxies with 99.7% availability, ensuring stable and reliable web data scraping to support AI, BI, and workflows."><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/thordata.jpg"></a>
160
161
  <a href="https://evomi.com?utm_source=github&utm_medium=banner&utm_campaign=d4vinci-scrapling" target="_blank" title="Evomi is your Swiss Quality Proxy Provider, starting at $0.49/GB"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/evomi.png"></a>
162
+ <a href="https://visit.decodo.com/Dy6W0b" target="_blank" title="Try the Most Efficient Residential Proxies for Free"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/decodo.png"></a>
161
163
  <a href="https://petrosky.io/d4vinci" target="_blank" title="PetroSky delivers cutting-edge VPS hosting."><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/petrosky.png"></a>
162
164
  <a href="https://www.swiftproxy.net/" target="_blank" title="Unlock Reliable Proxy Services with Swiftproxy!"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/swiftproxy.png"></a>
163
- <a href="https://serpapi.com/?utm_source=scrapling" target="_blank" title="Scrape Google and other search engines with SerpApi"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/SerpApi.png"></a>
164
165
  <a href="https://www.nstproxy.com/?type=flow&utm_source=scrapling" target="_blank" title="One Proxy Service, Infinite Solutions at Unbeatable Prices!"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/NSTproxy.png"></a>
166
+ <a href="https://serpapi.com/?utm_source=scrapling" target="_blank" title="Scrape Google and other search engines with SerpApi"><img src="https://raw.githubusercontent.com/D4Vinci/Scrapling/main/images/SerpApi.png"></a>
165
167
 
166
168
  <!-- /sponsors -->
167
169
 
@@ -410,10 +412,9 @@ This project includes code adapted from:
410
412
  ## Thanks and References
411
413
 
412
414
  - [Daijro](https://github.com/daijro)'s brilliant work on [BrowserForge](https://github.com/daijro/browserforge) and [Camoufox](https://github.com/daijro/camoufox)
413
- - [Vinyzu](https://github.com/Vinyzu)'s work on [Botright](https://github.com/Vinyzu/Botright)
415
+ - [Vinyzu](https://github.com/Vinyzu)'s brilliant work on [Botright](https://github.com/Vinyzu/Botright) and [PatchRight](https://github.com/Kaliiiiiiiiii-Vinyzu/patchright)
414
416
  - [brotector](https://github.com/kaliiiiiiiiii/brotector) for browser detection bypass techniques
415
- - [fakebrowser](https://github.com/kkoooqq/fakebrowser) for fingerprinting research
416
- - [rebrowser-patches](https://github.com/rebrowser/rebrowser-patches) for stealth improvements
417
+ - [fakebrowser](https://github.com/kkoooqq/fakebrowser) and [BotBrowser](https://github.com/botswin/BotBrowser) for fingerprinting research
417
418
 
418
419
  ---
419
420
  <div align="center"><small>Designed & crafted with ❤️ by Karim Shoair.</small></div><br>
@@ -1,15 +1,15 @@
1
- scrapling/__init__.py,sha256=c1t8r6IGEXC-PhNeFxFtoqNsiSv7B_9f_XBn52EWESg,1236
2
- scrapling/cli.py,sha256=ooObP0VoYGxnskEJB6xFp23NREI_XDPJpsMSr9Sv8nk,26355
1
+ scrapling/__init__.py,sha256=3-wjeMR5IQVhHoPcl5KYMo3cgA00q1mWn38q02xTWck,1236
2
+ scrapling/cli.py,sha256=tGQ3q4pHJZf1XJ8UIqPdT2JR9bjOhlXydmY1cNLkbZc,26363
3
3
  scrapling/fetchers.py,sha256=aYQUxp-0i-OBucdpdG6zjWCafTCgpXJdnJ0GIrm5GfA,26523
4
- scrapling/parser.py,sha256=aJRqfuOxBHrM_Co9XHeuL6qYHgQTyi7zD1DoCA3mROY,57321
4
+ scrapling/parser.py,sha256=Fh15nediLLSfYQOb_vr76YFUA_fNJFU7klYCkp_XXts,57517
5
5
  scrapling/py.typed,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
6
6
  scrapling/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  scrapling/core/_html_utils.py,sha256=ki47v54SsTL5-khi1jcLkJqAHqEq19cuex-dqzXdbEI,20328
8
8
  scrapling/core/_types.py,sha256=iXhi8LFkU4wjkGOjITdY1IDBEbn5rOxsl7xwEKT1L3I,895
9
9
  scrapling/core/ai.py,sha256=v3wjtXJgBRUtImE6Q_Bf_FruOArJyraQk4kqsqhlU8k,35474
10
- scrapling/core/custom_types.py,sha256=3kLrNDVm1vP3IziyyNjFVVQO_2bacwvm3hiK7h3gWjU,13634
10
+ scrapling/core/custom_types.py,sha256=GlQZiVIMCyv8vOdDUlASPn85r_4nw0P9ggID9q1VkRA,13608
11
11
  scrapling/core/mixins.py,sha256=2iUVcN2XSAKGEvNmAM2Rr9axpZoxu0M2gIFEaFTO_Dg,3206
12
- scrapling/core/shell.py,sha256=Um_CukPuX8K1VgEZsu_cXdsnoJTC_lnv4co2DBZXICU,21956
12
+ scrapling/core/shell.py,sha256=dCD8c_k1skXrKSIc_Qe_KgsiMOAS_1eCzgWjvSO74-I,22893
13
13
  scrapling/core/storage.py,sha256=8lWMPut6lPpvn9iOkgy9ao11_g8FNkXq67wHKtU4uuM,6290
14
14
  scrapling/core/translator.py,sha256=HLJngeRRw2M0eNe_f8AfQD64a49OECIEm5Df_WELVG4,5135
15
15
  scrapling/core/utils/__init__.py,sha256=7B14TcrDVwSaH6BQrMnzb1NtFa4Om237dJcF9oe-lM0,204
@@ -17,28 +17,28 @@ scrapling/core/utils/_shell.py,sha256=zes71MmFTs7V9f0JFstaWcjQhKNZN6xvspu29YVQtR
17
17
  scrapling/core/utils/_utils.py,sha256=ATy-wwz00U-alOGH-NGK-VoPNr1qYmUwEoWuqAHjDkg,3143
18
18
  scrapling/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  scrapling/engines/constants.py,sha256=DP7yVRK1w1W4B1eXGeeKKQNDSo163FFXdPaxTy4adqw,4088
20
- scrapling/engines/static.py,sha256=779pVJvCCjFSCKGN7a_9rAH55oO2SyO88cnVrE2DQy8,33204
20
+ scrapling/engines/static.py,sha256=Tuwl6cEDP5_OQKPFRmemz7ozdeZnWm0vir4J7JYdQCs,33122
21
21
  scrapling/engines/_browsers/__init__.py,sha256=lu5RgcV4zYacRaKm28ph5TzjqAovTaQNNfXSgQGwDOU,123
22
- scrapling/engines/_browsers/_base.py,sha256=vX75atq2QpsiCCpT4P2W4HQqs3P2RZWYHvDevh271kQ,12238
23
- scrapling/engines/_browsers/_camoufox.py,sha256=RHQMlSrn4PKg0p53b5r7aGzWxacTjUZWlPOdRPDOvls,34890
22
+ scrapling/engines/_browsers/_base.py,sha256=29rPeXyrRnFIPLLMbvq3CUxGw4sMEJ3nKki9CC1iH2g,11049
23
+ scrapling/engines/_browsers/_camoufox.py,sha256=BvxsTLcDpTMVoqsHIy7Smwls1zo6fpCtGMDW4v5Kim8,35356
24
24
  scrapling/engines/_browsers/_config_tools.py,sha256=mEPA5SGrWq0dl15cDOT6sOsm5NHMD0vI0fuPttGpw-U,4610
25
- scrapling/engines/_browsers/_controllers.py,sha256=GlYGt_LBTDjrWpD1zgKyyy9mynlEPR9MfnXJFUSDw2s,28246
26
- scrapling/engines/_browsers/_page.py,sha256=ixwI5d-AIzfUGekRSCbPLJAckf673B7QCyaWO-xJa84,3688
27
- scrapling/engines/_browsers/_validators.py,sha256=knkGvgpGeqtOWx4Us3pln1o4mJXfG4M-SWII080I9AE,6117
25
+ scrapling/engines/_browsers/_controllers.py,sha256=YuiO8uw8pyv8hQLBvZCJcTGrNbKZSsYzkPKK9X6bq6U,27232
26
+ scrapling/engines/_browsers/_page.py,sha256=1z-P6c97cTkULE-FVrsMY589e6eL_20Ae8pUe6vjggE,2206
27
+ scrapling/engines/_browsers/_validators.py,sha256=jvJjXURN79aeR-ZFc_k5zf_3ClP18gM1qZA7dMXd_YI,7491
28
28
  scrapling/engines/toolbelt/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
29
29
  scrapling/engines/toolbelt/convertor.py,sha256=e_rMcW8ScdfxKO-V5Mk61blVzwuDgd82CpRds0Z2tMQ,13102
30
30
  scrapling/engines/toolbelt/custom.py,sha256=uhMXa_LNcvvG3wZXBRKHXvqLqShMR9SHwc3bBv4UaQs,7664
31
31
  scrapling/engines/toolbelt/fingerprints.py,sha256=hCxKUTwo8sy7iN9wk8OA5vGo9XOn6E365zvC1C6zWDE,2212
32
- scrapling/engines/toolbelt/navigation.py,sha256=CWvM0KDuLPzvM6T8_yBq05nmB4scXshoEKVbhR4zEBk,3561
32
+ scrapling/engines/toolbelt/navigation.py,sha256=Ej23I1n9AjCwxva_yRXUQeefmYJgi7lgb2Wr_b8RNFs,3550
33
33
  scrapling/engines/toolbelt/bypasses/navigator_plugins.js,sha256=tbnnk3nCXB6QEQnOhDlu3n-s7lnUTAkrUsjP6FDQIQg,2104
34
34
  scrapling/engines/toolbelt/bypasses/notification_permission.js,sha256=poPM3o5WYgEX-EdiUfDCllpWfc3Umvw4jr2u6O6elus,237
35
35
  scrapling/engines/toolbelt/bypasses/playwright_fingerprint.js,sha256=clzuf7KYcvDWYaKKxT_bkAoCT2fGsOcUw47948CHjAc,267
36
36
  scrapling/engines/toolbelt/bypasses/screen_props.js,sha256=fZEuHMQ1-fYuxxUMoQXUvVWYUkPUbblkfMfpiLvBY7w,599
37
37
  scrapling/engines/toolbelt/bypasses/webdriver_fully.js,sha256=hdJw4clRAJQqIdq5gIFC_eC-x7C1i2ab01KV5ylmOBs,728
38
38
  scrapling/engines/toolbelt/bypasses/window_chrome.js,sha256=D7hqzNGGDorh8JVlvm2YIv7Bk2CoVkG55MDIdyqhT1w,6808
39
- scrapling-0.3.3.dist-info/licenses/LICENSE,sha256=XHgu8DRuT7_g3Hb9Q18YGg8eShp6axPBacbnQxT_WWQ,1499
40
- scrapling-0.3.3.dist-info/METADATA,sha256=QXhVgzdtzq9U5kEpv8kWSkGD64EQBoZfmR5QRkfTV1I,21948
41
- scrapling-0.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
42
- scrapling-0.3.3.dist-info/entry_points.txt,sha256=DHyt2Blxy0P5OE2HRcP95Wz9_xo2ERCDcNqrJjYS3o8,49
43
- scrapling-0.3.3.dist-info/top_level.txt,sha256=Ud-yF-PC2U5HQ3nc5QwT7HSPdIpF1RuwQ_mYgBzHHIM,10
44
- scrapling-0.3.3.dist-info/RECORD,,
39
+ scrapling-0.3.5.dist-info/licenses/LICENSE,sha256=XHgu8DRuT7_g3Hb9Q18YGg8eShp6axPBacbnQxT_WWQ,1499
40
+ scrapling-0.3.5.dist-info/METADATA,sha256=a-ZKBr0yH6jKb88l5BpbwMhWEbP-mQG3_NoI4Rogv9M,22513
41
+ scrapling-0.3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
42
+ scrapling-0.3.5.dist-info/entry_points.txt,sha256=DHyt2Blxy0P5OE2HRcP95Wz9_xo2ERCDcNqrJjYS3o8,49
43
+ scrapling-0.3.5.dist-info/top_level.txt,sha256=Ud-yF-PC2U5HQ3nc5QwT7HSPdIpF1RuwQ_mYgBzHHIM,10
44
+ scrapling-0.3.5.dist-info/RECORD,,