fraudcrawler 0.6.0__tar.gz → 0.6.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fraudcrawler might be problematic. Click here for more details.
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/PKG-INFO +1 -1
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/base.py +8 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/launch_demo_pipeline.py +1 -1
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/scraping/search.py +169 -147
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/pyproject.toml +1 -1
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/LICENSE +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/README.md +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/__init__.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/__init__.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/client.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/google-languages.json +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/google-locations.json +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/orchestrator.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/base/retry.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/processing/__init__.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/processing/processor.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/scraping/__init__.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/scraping/enrich.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/scraping/url.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/scraping/zyte.py +0 -0
- {fraudcrawler-0.6.0 → fraudcrawler-0.6.1}/fraudcrawler/settings.py +0 -0
|
@@ -217,6 +217,14 @@ class DomainUtils:
|
|
|
217
217
|
"""
|
|
218
218
|
|
|
219
219
|
_hostname_pattern = r"^(?:https?:\/\/)?([^\/:?#]+)"
|
|
220
|
+
_headers = {
|
|
221
|
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
|
222
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
|
223
|
+
"Accept-Language": "en-US,en;q=0.5",
|
|
224
|
+
"Accept-Encoding": "gzip, deflate",
|
|
225
|
+
"Connection": "keep-alive",
|
|
226
|
+
"Upgrade-Insecure-Requests": "1",
|
|
227
|
+
}
|
|
220
228
|
|
|
221
229
|
def _get_domain(self, url: str) -> str:
|
|
222
230
|
"""Extracts the second-level domain together with the top-level domain (e.g. `google.com`).
|
|
@@ -8,7 +8,7 @@ from urllib.parse import quote_plus
|
|
|
8
8
|
from bs4 import BeautifulSoup
|
|
9
9
|
from bs4.element import Tag
|
|
10
10
|
import httpx
|
|
11
|
-
from tenacity import RetryCallState
|
|
11
|
+
from tenacity import RetryCallState
|
|
12
12
|
|
|
13
13
|
from fraudcrawler.settings import (
|
|
14
14
|
SEARCH_DEFAULT_COUNTRY_CODES,
|
|
@@ -45,6 +45,14 @@ class SearchEngine(ABC, DomainUtils):
|
|
|
45
45
|
|
|
46
46
|
_hostname_pattern = r"^(?:https?:\/\/)?([^\/:?#]+)"
|
|
47
47
|
|
|
48
|
+
def __init__(self, http_client: httpx.AsyncClient):
|
|
49
|
+
"""Initializes the SearchEngine with the given HTTP client.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
http_client: An httpx.AsyncClient to use for the async requests.
|
|
53
|
+
"""
|
|
54
|
+
self._http_client = http_client
|
|
55
|
+
|
|
48
56
|
@property
|
|
49
57
|
@abstractmethod
|
|
50
58
|
def _search_engine_name(self) -> str:
|
|
@@ -56,45 +64,81 @@ class SearchEngine(ABC, DomainUtils):
|
|
|
56
64
|
"""Apply the search with the given parameters and return results."""
|
|
57
65
|
pass
|
|
58
66
|
|
|
67
|
+
def _create_search_result(self, url: str) -> SearchResult:
|
|
68
|
+
"""From a given url it creates the class:`SearchResult` instance."""
|
|
69
|
+
# Get marketplace name
|
|
70
|
+
domain = self._get_domain(url=url)
|
|
71
|
+
|
|
72
|
+
# Create and return the SearchResult object
|
|
73
|
+
result = SearchResult(
|
|
74
|
+
url=url,
|
|
75
|
+
domain=domain,
|
|
76
|
+
search_engine_name=self._search_engine_name,
|
|
77
|
+
)
|
|
78
|
+
return result
|
|
79
|
+
|
|
59
80
|
@classmethod
|
|
60
81
|
def _log_before(
|
|
61
|
-
cls,
|
|
82
|
+
cls, url: str, params: dict | None, retry_state: RetryCallState | None
|
|
62
83
|
) -> None:
|
|
63
|
-
"""Context aware logging before
|
|
84
|
+
"""Context aware logging before HTTP request is made."""
|
|
64
85
|
if retry_state:
|
|
65
86
|
logger.debug(
|
|
66
|
-
f'Performing
|
|
67
|
-
f"(attempt {retry_state.attempt_number})."
|
|
87
|
+
f'Performing HTTP request in {cls.__name__} to url="{url}" '
|
|
88
|
+
f"with params={params} (attempt {retry_state.attempt_number})."
|
|
68
89
|
)
|
|
69
90
|
else:
|
|
70
91
|
logger.debug(f"retry_state is {retry_state}; not logging before.")
|
|
71
92
|
|
|
72
93
|
@classmethod
|
|
73
94
|
def _log_before_sleep(
|
|
74
|
-
cls,
|
|
95
|
+
cls, url: str, params: dict | None, retry_state: RetryCallState | None
|
|
75
96
|
) -> None:
|
|
76
|
-
"""Context aware logging before sleeping after a failed request."""
|
|
97
|
+
"""Context aware logging before sleeping after a failed HTTP request."""
|
|
77
98
|
if retry_state and retry_state.outcome:
|
|
78
99
|
logger.warning(
|
|
79
|
-
f
|
|
100
|
+
f"Attempt {retry_state.attempt_number} of {cls.__name__} HTTP request "
|
|
101
|
+
f'to url="{url}" with params="{params}" '
|
|
80
102
|
f"failed with error: {retry_state.outcome.exception()}. "
|
|
81
103
|
f"Retrying in {retry_state.upcoming_sleep:.0f} seconds."
|
|
82
104
|
)
|
|
83
105
|
else:
|
|
84
106
|
logger.debug(f"retry_state is {retry_state}; not logging before_sleep.")
|
|
85
107
|
|
|
86
|
-
def
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
108
|
+
async def http_client_get(
|
|
109
|
+
self, url: str, params: dict | None = None, headers: dict | None = None
|
|
110
|
+
) -> httpx.Response:
|
|
111
|
+
"""Performs a GET request with retries.
|
|
90
112
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
url
|
|
94
|
-
|
|
95
|
-
|
|
113
|
+
Args:
|
|
114
|
+
retry: The retry strategy to use.
|
|
115
|
+
url: The URL to request.
|
|
116
|
+
params: Query parameters for the request.
|
|
117
|
+
headers: HTTP headers to use for the request.
|
|
118
|
+
"""
|
|
119
|
+
# Perform the request and retry if necessary. There is some context aware logging:
|
|
120
|
+
# - `before`: before the request is made (and before retrying)
|
|
121
|
+
# - `before_sleep`: if the request fails before sleeping
|
|
122
|
+
retry = get_async_retry()
|
|
123
|
+
retry.before = lambda retry_state: self._log_before(
|
|
124
|
+
url=url, params=params, retry_state=retry_state
|
|
96
125
|
)
|
|
97
|
-
|
|
126
|
+
retry.before_sleep = lambda retry_state: self._log_before_sleep(
|
|
127
|
+
url=url, params=params, retry_state=retry_state
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
async for attempt in retry:
|
|
131
|
+
with attempt:
|
|
132
|
+
response = await self._http_client.get(
|
|
133
|
+
url=url,
|
|
134
|
+
params=params,
|
|
135
|
+
headers=headers,
|
|
136
|
+
)
|
|
137
|
+
response.raise_for_status()
|
|
138
|
+
return response
|
|
139
|
+
|
|
140
|
+
# In case of not entering the for loop (for some strange reason)
|
|
141
|
+
raise RuntimeError("Retry exhausted without success")
|
|
98
142
|
|
|
99
143
|
|
|
100
144
|
class SerpAPI(SearchEngine):
|
|
@@ -109,7 +153,7 @@ class SerpAPI(SearchEngine):
|
|
|
109
153
|
http_client: An httpx.AsyncClient to use for the async requests.
|
|
110
154
|
api_key: The API key for SerpAPI.
|
|
111
155
|
"""
|
|
112
|
-
|
|
156
|
+
super().__init__(http_client=http_client)
|
|
113
157
|
self._api_key = api_key
|
|
114
158
|
|
|
115
159
|
@property
|
|
@@ -205,22 +249,10 @@ class SerpAPI(SearchEngine):
|
|
|
205
249
|
}
|
|
206
250
|
logger.debug(f"SerpAPI search with params: {params}")
|
|
207
251
|
|
|
208
|
-
# Perform the request
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
retry = get_async_retry()
|
|
212
|
-
retry.before = lambda retry_state: self._log_before(
|
|
213
|
-
search_string=search_string, retry_state=retry_state
|
|
214
|
-
)
|
|
215
|
-
retry.before_sleep = lambda retry_state: self._log_before_sleep(
|
|
216
|
-
search_string=search_string, retry_state=retry_state
|
|
252
|
+
# Perform the search request
|
|
253
|
+
response: httpx.Response = await self.http_client_get(
|
|
254
|
+
url=self._endpoint, params=params
|
|
217
255
|
)
|
|
218
|
-
async for attempt in retry:
|
|
219
|
-
with attempt:
|
|
220
|
-
response = await self._http_client.get(
|
|
221
|
-
url=self._endpoint, params=params
|
|
222
|
-
)
|
|
223
|
-
response.raise_for_status()
|
|
224
256
|
|
|
225
257
|
# Extract the URLs from the response
|
|
226
258
|
data = response.json()
|
|
@@ -336,7 +368,21 @@ class SerpAPIGoogleShopping(SerpAPI):
|
|
|
336
368
|
"""
|
|
337
369
|
results = data.get("shopping_results")
|
|
338
370
|
if results is not None:
|
|
339
|
-
return [url for res in results if (url := res.get("product_link"))]
|
|
371
|
+
# return [url for res in results if (url := res.get("product_link"))] # c.f. https://github.com/serpapi/public-roadmap/issues/3045
|
|
372
|
+
return [
|
|
373
|
+
url
|
|
374
|
+
for res in results
|
|
375
|
+
if (url := res.get("serpapi_immersive_product_api"))
|
|
376
|
+
]
|
|
377
|
+
return []
|
|
378
|
+
|
|
379
|
+
@staticmethod
|
|
380
|
+
def _extract_product_urls_from_immersive_product_api(data: dict) -> List[str]:
|
|
381
|
+
"""Extracts product urls from the serpapi immersive product API data."""
|
|
382
|
+
if results := data.get("product_results"):
|
|
383
|
+
stores = results.get("stores", [])
|
|
384
|
+
urls = [url for sre in stores if (url := sre.get("link"))]
|
|
385
|
+
return list(set(urls))
|
|
340
386
|
return []
|
|
341
387
|
|
|
342
388
|
async def search(
|
|
@@ -349,6 +395,9 @@ class SerpAPIGoogleShopping(SerpAPI):
|
|
|
349
395
|
) -> List[SearchResult]:
|
|
350
396
|
"""Performs a google shopping search using SerpApi and returns SearchResults.
|
|
351
397
|
|
|
398
|
+
Similar to Toppreise, this method extracts merchant URLs from Google Shopping product pages
|
|
399
|
+
and creates multiple SearchResult objects for each merchant URL found.
|
|
400
|
+
|
|
352
401
|
Args:
|
|
353
402
|
search_term: The search term to use for the query.
|
|
354
403
|
language: The language to use for the query ('hl' parameter).
|
|
@@ -362,7 +411,7 @@ class SerpAPIGoogleShopping(SerpAPI):
|
|
|
362
411
|
marketplaces=marketplaces,
|
|
363
412
|
)
|
|
364
413
|
|
|
365
|
-
# Perform the search
|
|
414
|
+
# Perform the search to get Google Shopping URLs
|
|
366
415
|
urls = await self._search(
|
|
367
416
|
search_string=search_string,
|
|
368
417
|
language=language,
|
|
@@ -375,10 +424,10 @@ class SerpAPIGoogleShopping(SerpAPI):
|
|
|
375
424
|
# and Google Shopping searches (see https://github.com/serpapi/public-roadmap/issues/1858)
|
|
376
425
|
urls = urls[:num_results]
|
|
377
426
|
|
|
378
|
-
# Create
|
|
427
|
+
# Create SearchResult objects from merchant URLs (similar to Toppreise pattern)
|
|
379
428
|
results = [self._create_search_result(url=url) for url in urls]
|
|
380
429
|
logger.debug(
|
|
381
|
-
f'Produced {len(results)} results from
|
|
430
|
+
f'Produced {len(results)} results from Google Shopping search with q="{search_string}".'
|
|
382
431
|
)
|
|
383
432
|
return results
|
|
384
433
|
|
|
@@ -387,14 +436,6 @@ class Toppreise(SearchEngine):
|
|
|
387
436
|
"""Search engine for toppreise.ch."""
|
|
388
437
|
|
|
389
438
|
_endpoint = "https://www.toppreise.ch/"
|
|
390
|
-
_headers = {
|
|
391
|
-
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
|
392
|
-
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
|
393
|
-
"Accept-Language": "en-US,en;q=0.5",
|
|
394
|
-
"Accept-Encoding": "gzip, deflate",
|
|
395
|
-
"Connection": "keep-alive",
|
|
396
|
-
"Upgrade-Insecure-Requests": "1",
|
|
397
|
-
}
|
|
398
439
|
|
|
399
440
|
def __init__(self, http_client: httpx.AsyncClient, zyteapi_key: str):
|
|
400
441
|
"""Initializes the Toppreise client.
|
|
@@ -403,9 +444,42 @@ class Toppreise(SearchEngine):
|
|
|
403
444
|
http_client: An httpx.AsyncClient to use for the async requests.
|
|
404
445
|
zyteapi_key: ZyteAPI key for fallback when direct access fails.
|
|
405
446
|
"""
|
|
406
|
-
|
|
447
|
+
super().__init__(http_client=http_client)
|
|
407
448
|
self._zyteapi = ZyteAPI(http_client=http_client, api_key=zyteapi_key)
|
|
408
449
|
|
|
450
|
+
async def http_client_get_with_fallback(self, url: str) -> bytes:
|
|
451
|
+
"""Performs a GET request with retries.
|
|
452
|
+
|
|
453
|
+
If direct access fails (e.g. 403 Forbidden), it will attempt to unblock the URL
|
|
454
|
+
content using Zyte proxy mode.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
url: The URL to request.
|
|
458
|
+
"""
|
|
459
|
+
# Try to access the URL directly
|
|
460
|
+
try:
|
|
461
|
+
response: httpx.Response = await self.http_client_get(
|
|
462
|
+
url=url, headers=self._headers
|
|
463
|
+
)
|
|
464
|
+
content = response.content
|
|
465
|
+
|
|
466
|
+
# If we get a 403 Error (can happen depending on IP/location of deployment),
|
|
467
|
+
# we try to unblock the URL using Zyte proxy mode
|
|
468
|
+
except httpx.HTTPStatusError as err_direct:
|
|
469
|
+
if err_direct.response.status_code == 403:
|
|
470
|
+
logger.warning(
|
|
471
|
+
f"Received 403 Forbidden for {url}, attempting to unblock with Zyte proxy"
|
|
472
|
+
)
|
|
473
|
+
try:
|
|
474
|
+
content = await self._zyteapi.unblock_url_content(url)
|
|
475
|
+
except Exception as err_resolve:
|
|
476
|
+
msg = f'Error unblocking URL="{url}" with Zyte proxy: {err_resolve}'
|
|
477
|
+
logger.error(msg)
|
|
478
|
+
raise httpx.HTTPError(msg) from err_resolve
|
|
479
|
+
else:
|
|
480
|
+
raise err_direct
|
|
481
|
+
return content
|
|
482
|
+
|
|
409
483
|
@classmethod
|
|
410
484
|
def _get_search_endpoint(cls, language: Language) -> str:
|
|
411
485
|
"""Get the search endpoint based on the language."""
|
|
@@ -502,46 +576,6 @@ class Toppreise(SearchEngine):
|
|
|
502
576
|
"""The name of the search engine."""
|
|
503
577
|
return SearchEngineName.TOPPREISE.value
|
|
504
578
|
|
|
505
|
-
async def http_client_get_with_fallback(
|
|
506
|
-
self, url: str, retry: AsyncRetrying
|
|
507
|
-
) -> bytes:
|
|
508
|
-
"""Performs a GET request with retries.
|
|
509
|
-
|
|
510
|
-
If direct access fails (e.g. 403 Forbidden), it will attempt to unblock the URL
|
|
511
|
-
content using Zyte proxy mode.
|
|
512
|
-
|
|
513
|
-
Args:
|
|
514
|
-
url: The URL to request.
|
|
515
|
-
retry: The retry strategy to use.
|
|
516
|
-
"""
|
|
517
|
-
# Try to access the URL directly
|
|
518
|
-
try:
|
|
519
|
-
async for attempt in retry:
|
|
520
|
-
with attempt:
|
|
521
|
-
response = await self._http_client.get(
|
|
522
|
-
url=url,
|
|
523
|
-
headers=self._headers,
|
|
524
|
-
)
|
|
525
|
-
response.raise_for_status()
|
|
526
|
-
content = response.content
|
|
527
|
-
|
|
528
|
-
# If we get a 403 Error (can happen depending on IP/location of deployment),
|
|
529
|
-
# we try to unblock the URL using Zyte proxy mode
|
|
530
|
-
except httpx.HTTPStatusError as err_direct:
|
|
531
|
-
if err_direct.response.status_code == 403:
|
|
532
|
-
logger.warning(
|
|
533
|
-
f"Received 403 Forbidden for {url}, attempting to unblock with Zyte proxy"
|
|
534
|
-
)
|
|
535
|
-
try:
|
|
536
|
-
content = await self._zyteapi.unblock_url_content(url)
|
|
537
|
-
except Exception as err_resolve:
|
|
538
|
-
msg = f'Error unblocking URL="{url}" with Zyte proxy: {err_resolve}'
|
|
539
|
-
logger.error(msg)
|
|
540
|
-
raise httpx.HTTPError(msg) from err_resolve
|
|
541
|
-
else:
|
|
542
|
-
raise err_direct
|
|
543
|
-
return content
|
|
544
|
-
|
|
545
579
|
async def _search(
|
|
546
580
|
self, search_string: str, language: Language, num_results: int
|
|
547
581
|
) -> List[str]:
|
|
@@ -561,17 +595,8 @@ class Toppreise(SearchEngine):
|
|
|
561
595
|
url = f"{endpoint}?q={encoded_search}"
|
|
562
596
|
logger.debug(f"Toppreise search URL: {url}")
|
|
563
597
|
|
|
564
|
-
# Perform the request
|
|
565
|
-
|
|
566
|
-
# - `before_sleep`: if the request fails before sleeping
|
|
567
|
-
retry = get_async_retry()
|
|
568
|
-
retry.before = lambda retry_state: self._log_before(
|
|
569
|
-
search_string=search_string, retry_state=retry_state
|
|
570
|
-
)
|
|
571
|
-
retry.before_sleep = lambda retry_state: self._log_before_sleep(
|
|
572
|
-
search_string=search_string, retry_state=retry_state
|
|
573
|
-
)
|
|
574
|
-
content = await self.http_client_get_with_fallback(url=url, retry=retry)
|
|
598
|
+
# Perform the request with fallback if necessary
|
|
599
|
+
content = await self.http_client_get_with_fallback(url=url)
|
|
575
600
|
|
|
576
601
|
# Get external product urls from the content
|
|
577
602
|
urls = self._extract_product_urls_from_search_page(content=content)
|
|
@@ -633,61 +658,44 @@ class Searcher(DomainUtils):
|
|
|
633
658
|
zyteapi_key=zyteapi_key,
|
|
634
659
|
)
|
|
635
660
|
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
"""Context aware logging before the request is made."""
|
|
639
|
-
if retry_state:
|
|
640
|
-
logger.debug(
|
|
641
|
-
f'Performing post search for url="{url}" '
|
|
642
|
-
f"(attempt {retry_state.attempt_number})."
|
|
643
|
-
)
|
|
644
|
-
else:
|
|
645
|
-
logger.debug(f"retry_state is {retry_state}; not logging before.")
|
|
661
|
+
async def _post_search_google_shopping_immersive(self, url: str) -> List[str]:
|
|
662
|
+
"""Post-search for product URLs from a Google Shopping immersive product page.
|
|
646
663
|
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
""
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
664
|
+
Args:
|
|
665
|
+
url: The URL of the Google Shopping product page.
|
|
666
|
+
"""
|
|
667
|
+
# Add SerpAPI key to the url
|
|
668
|
+
sep = "&" if "?" in url else "?"
|
|
669
|
+
url = f"{url}{sep}api_key={self._google_shopping._api_key}"
|
|
670
|
+
|
|
671
|
+
# Fetch the content of the Google Shopping product page
|
|
672
|
+
response = await self._google_shopping.http_client_get(url=url)
|
|
673
|
+
|
|
674
|
+
# Get external product urls from the data
|
|
675
|
+
data = response.json()
|
|
676
|
+
urls = self._google_shopping._extract_product_urls_from_immersive_product_api(
|
|
677
|
+
data=data
|
|
678
|
+
)
|
|
679
|
+
return urls
|
|
660
680
|
|
|
661
681
|
async def _post_search_toppreise_comparison(self, url: str) -> List[str]:
|
|
662
682
|
"""Post-search for product URLs from a Toppreise product comparison page.
|
|
663
683
|
|
|
664
684
|
Note:
|
|
665
685
|
In comparison to the function Toppreise._search, here we extract the urls from
|
|
666
|
-
product comparison pages (f.e. https://www.toppreise.ch/preisvergleich/).
|
|
667
|
-
also be found in the results of a google search.
|
|
686
|
+
product comparison pages (f.e. https://www.toppreise.ch/preisvergleich/). These
|
|
687
|
+
pages can also be found in the results of a google search.
|
|
668
688
|
|
|
669
689
|
Args:
|
|
670
690
|
url: The URL of the Toppreise product listing page.
|
|
671
691
|
"""
|
|
672
|
-
# Perform the request
|
|
673
|
-
|
|
674
|
-
# - `before_sleep`: if the request fails before sleeping
|
|
675
|
-
retry = get_async_retry(stop_after=self._post_search_retry_stop_after)
|
|
676
|
-
retry.before = lambda retry_state: self._post_search_log_before(
|
|
677
|
-
url=url, retry_state=retry_state
|
|
678
|
-
)
|
|
679
|
-
retry.before_sleep = lambda retry_state: self._post_search_log_before_sleep(
|
|
680
|
-
url=url, retry_state=retry_state
|
|
681
|
-
)
|
|
682
|
-
content = await self._toppreise.http_client_get_with_fallback(
|
|
683
|
-
url=url, retry=retry
|
|
684
|
-
)
|
|
692
|
+
# Perform the request with fallback if necessary
|
|
693
|
+
content = await self._toppreise.http_client_get_with_fallback(url=url)
|
|
685
694
|
|
|
686
695
|
# Get external product urls from the content
|
|
687
696
|
urls = self._toppreise._extract_product_urls_from_comparison_page(
|
|
688
697
|
content=content
|
|
689
698
|
)
|
|
690
|
-
|
|
691
699
|
return urls
|
|
692
700
|
|
|
693
701
|
async def _post_search(self, results: List[SearchResult]) -> List[SearchResult]:
|
|
@@ -703,9 +711,22 @@ class Searcher(DomainUtils):
|
|
|
703
711
|
post_search_results: List[SearchResult] = []
|
|
704
712
|
for res in results:
|
|
705
713
|
url = res.url
|
|
714
|
+
post_search_urls: List[str] = []
|
|
715
|
+
|
|
716
|
+
# Extract embedded product URLs from the Google Shopping immersive product page
|
|
717
|
+
if "engine=google_immersive_product" in url:
|
|
718
|
+
logger.debug(
|
|
719
|
+
f'Extracting embedded product URLs from url="{url}" found by search_engine="{res.search_engine_name}"'
|
|
720
|
+
)
|
|
721
|
+
post_search_urls = await self._post_search_google_shopping_immersive(
|
|
722
|
+
url=url
|
|
723
|
+
)
|
|
724
|
+
logger.debug(
|
|
725
|
+
f'Extracted {len(post_search_urls)} embedded product URLs from url="{url}".'
|
|
726
|
+
)
|
|
706
727
|
|
|
707
728
|
# Extract embedded product URLs from the Toppreise product listing page
|
|
708
|
-
|
|
729
|
+
elif any(pth in url for pth in TOPPREISE_COMPARISON_PATHS):
|
|
709
730
|
logger.debug(
|
|
710
731
|
f'Extracting embedded product URLs from url="{url}" found by search_engine="{res.search_engine_name}"'
|
|
711
732
|
)
|
|
@@ -714,15 +735,16 @@ class Searcher(DomainUtils):
|
|
|
714
735
|
f'Extracted {len(post_search_urls)} embedded product URLs from url="{url}".'
|
|
715
736
|
)
|
|
716
737
|
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
738
|
+
# Add the extracted product URLs as SearchResult objects
|
|
739
|
+
psr = [
|
|
740
|
+
SearchResult(
|
|
741
|
+
url=psu,
|
|
742
|
+
domain=self._get_domain(url=psu),
|
|
743
|
+
search_engine_name=res.search_engine_name,
|
|
744
|
+
)
|
|
745
|
+
for psu in post_search_urls
|
|
746
|
+
]
|
|
747
|
+
post_search_results.extend(psr)
|
|
726
748
|
|
|
727
749
|
return post_search_results
|
|
728
750
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|