yad2-scraper 0.1.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) 2025 DavOstx7
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
1
+ MIT License
2
+
3
+ Copyright (c) 2025 DavOstx7
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: yad2-scraper
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: Scrape Yad2 in Python.
5
5
  License: LICENSE
6
6
  Author: dav ost
@@ -13,9 +13,13 @@ Classifier: Programming Language :: Python :: 3.8
13
13
  Classifier: Programming Language :: Python :: 3.9
14
14
  Classifier: Programming Language :: Python :: 3.10
15
15
  Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
16
18
  Requires-Dist: beautifulsoup4 (>=4.11.1,<5.0.0)
17
19
  Requires-Dist: fake-useragent (>=0.1.11,<0.2.0)
20
+ Requires-Dist: httpcore (>=0.15.0)
18
21
  Requires-Dist: httpx (>=0.24.0,<0.25.0)
22
+ Requires-Dist: pydantic (>=1.10.0,<2.0.0)
19
23
  Description-Content-Type: text/markdown
20
24
 
21
25
  # yad2-scraper
@@ -1,2 +1,2 @@
1
- # yad2-scraper
2
- Scrape Yad2 in Python.
1
+ # yad2-scraper
2
+ Scrape Yad2 in Python.
@@ -1,21 +1,23 @@
1
- [tool.poetry]
2
- name = "yad2-scraper"
3
- version = "0.1.0"
4
- description = "Scrape Yad2 in Python."
5
- authors = ["dav ost <davidost2003@gmail.com>"]
6
- license = "LICENSE"
7
- readme = "README.md"
8
-
9
- [tool.poetry.dependencies]
10
- python = ">=3.7"
11
- httpx = "^0.24.0"
12
- fake-useragent = "^0.1.11"
13
- beautifulsoup4 = "^4.11.1"
14
-
15
- [tool.poetry.dev-dependencies]
16
- pytest = "^6.2.2"
17
- respx = "^0.15.0"
18
-
19
- [build-system]
20
- requires = ["poetry-core"]
1
+ [tool.poetry]
2
+ name = "yad2-scraper"
3
+ version = "0.3.0"
4
+ description = "Scrape Yad2 in Python."
5
+ authors = ["dav ost <davidost2003@gmail.com>"]
6
+ license = "LICENSE"
7
+ readme = "README.md"
8
+
9
+ [tool.poetry.dependencies]
10
+ python = ">=3.7"
11
+ httpx = "^0.24.0"
12
+ httpcore = ">=0.15.0"
13
+ fake-useragent = "^0.1.11"
14
+ beautifulsoup4 = "^4.11.1"
15
+ pydantic = "^1.10.0"
16
+
17
+ [tool.poetry.group.dev.dependencies]
18
+ pytest = "^6.2.2"
19
+ respx = "^0.20.0"
20
+
21
+ [build-system]
22
+ requires = ["poetry-core"]
21
23
  build-backend = "poetry.core.masonry.api"
@@ -1,4 +1,4 @@
1
- from .scraper import Yad2Scraper
2
- from .query import QueryFilters, OrderBy
3
- from .category import Yad2Category
4
- from .next_data import NextData
1
+ from .scraper import Yad2Scraper
2
+ from .query import QueryFilters, OrderBy
3
+ from .category import Yad2Category
4
+ from .next_data import NextData
@@ -1,25 +1,25 @@
1
- import json
2
- from bs4 import BeautifulSoup, Tag
3
- from typing import Optional, List, Union, TextIO, BinaryIO
4
-
5
- from yad2_scraper.next_data import NextData
6
- from yad2_scraper.utils import find_all_html_tags_by_class_substring
7
- from yad2_scraper.constants import NEXT_DATA_SCRIPT_ID
8
-
9
-
10
- class Yad2Category:
11
- def __init__(self, soup: BeautifulSoup):
12
- self.soup = soup
13
-
14
- @classmethod
15
- def from_html_io(cls, html_io: Union[TextIO, BinaryIO]):
16
- html = html_io.read()
17
- soup = BeautifulSoup(html, "html.parser")
18
- return cls(soup)
19
-
20
- def load_next_data(self) -> Optional[NextData]:
21
- tag = self.soup.find("script", id=NEXT_DATA_SCRIPT_ID)
22
- return NextData(json.loads(tag.string)) if tag else None
23
-
24
- def find_all_tags_by_class_substring(self, tag_name: str, substring: str) -> List[Tag]:
25
- return find_all_html_tags_by_class_substring(self.soup, tag_name, substring)
1
+ import json
2
+ from bs4 import BeautifulSoup, Tag
3
+ from typing import Optional, List, Union, TextIO, BinaryIO
4
+
5
+ from yad2_scraper.next_data import NextData
6
+ from yad2_scraper.utils import find_all_html_tags_by_class_substring
7
+ from yad2_scraper.constants import NEXT_DATA_SCRIPT_ID
8
+
9
+
10
+ class Yad2Category:
11
+ def __init__(self, soup: BeautifulSoup):
12
+ self.soup = soup
13
+
14
+ @classmethod
15
+ def from_html_io(cls, html_io: Union[TextIO, BinaryIO]):
16
+ html = html_io.read()
17
+ soup = BeautifulSoup(html, "html.parser")
18
+ return cls(soup)
19
+
20
+ def load_next_data(self) -> Optional[NextData]:
21
+ tag = self.soup.find("script", id=NEXT_DATA_SCRIPT_ID)
22
+ return NextData(json.loads(tag.string)) if tag else None
23
+
24
+ def find_all_tags_by_class_substring(self, tag_name: str, substring: str) -> List[Tag]:
25
+ return find_all_html_tags_by_class_substring(self.soup, tag_name, substring)
@@ -1,26 +1,27 @@
1
- BASE_URL = "https://www.yad2.co.il"
2
-
3
- DEFAULT_REQUEST_HEADERS = {
4
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15",
5
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
6
- "Accept-Encoding": "gzip, deflate, br",
7
- "Accept-Language": "en-US,en;q=0.9",
8
- "Connection": "keep-alive",
9
- "Upgrade-Insecure-Requests": "1",
10
- "DNT": "1",
11
- "Cache-Control": "max-age=0",
12
- "Sec-Fetch-Site": "none",
13
- "Sec-Fetch-Mode": "navigate",
14
- "Sec-Fetch-User": "?1",
15
- "Sec-Fetch-Dest": "document",
16
- }
17
-
18
- ALLOW_REQUEST_REDIRECTS = True
19
- VERIFY_REQUEST_SSL = True
20
-
21
- ANTIBOT_CONTENT = b"Are you for real" # robot-captcha
22
-
23
- FIRST_PAGE = 1
24
- NOT_MENTIONED_PRICE_RANGE = 0, 0
25
-
26
- NEXT_DATA_SCRIPT_ID = "__NEXT_DATA__"
1
+ BASE_URL = "https://www.yad2.co.il"
2
+
3
+ DEFAULT_REQUEST_HEADERS = {
4
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15",
5
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
6
+ "Accept-Encoding": "gzip, deflate, br",
7
+ "Accept-Language": "en-US,en;q=0.9",
8
+ "Connection": "keep-alive",
9
+ "Upgrade-Insecure-Requests": "1",
10
+ "DNT": "1",
11
+ "Cache-Control": "max-age=0",
12
+ "Sec-Fetch-Site": "none",
13
+ "Sec-Fetch-Mode": "navigate",
14
+ "Sec-Fetch-User": "?1",
15
+ "Sec-Fetch-Dest": "document",
16
+ }
17
+
18
+ ALLOW_REQUEST_REDIRECTS = True
19
+ VERIFY_REQUEST_SSL = True
20
+
21
+ ANTIBOT_CONTENT_IDENTIFIER = b"Are you for real" # robot-captcha
22
+ YAD2_CONTENT_IDENTIFIER = b"https://www.yad2.co.il/"
23
+
24
+ FIRST_PAGE_NUMBER = 1
25
+ NOT_MENTIONED_PRICE_RANGE = 0, 0
26
+
27
+ NEXT_DATA_SCRIPT_ID = "__NEXT_DATA__"
@@ -0,0 +1,31 @@
1
+ import httpx
2
+ from typing import List
3
+
4
+
5
+ class ResponseError(httpx.HTTPStatusError):
6
+ # This adds the request/response objects to the error
7
+ pass
8
+
9
+
10
+ class AntiBotDetectedError(ResponseError):
11
+ pass
12
+
13
+
14
+ class UnexpectedContentError(ResponseError):
15
+ pass
16
+
17
+
18
+ class MaxRetriesExceededError(Exception):
19
+ def __init__(self, msg: str, errors: List[Exception] = None):
20
+ super().__init__(msg)
21
+ self.errors = errors
22
+
23
+
24
+ class MaxRequestRetriesExceededError(MaxRetriesExceededError):
25
+ def __init__(self, method: str, url: str, max_retries: int, errors: List[Exception] = None):
26
+ self.method = method
27
+ self.url = url
28
+ self.max_retries = max_retries
29
+
30
+ msg = f"All {self.max_retries} retry attempts for {self.method} request to '{self.url}' have failed"
31
+ super().__init__(msg, errors)
@@ -1,27 +1,27 @@
1
- from enum import Enum
2
- from typing import List, Union
3
-
4
-
5
- class Field(str, Enum):
6
- ID = "id"
7
- TEXT = "text"
8
- ENGLISH_TEXT = "textEng"
9
-
10
-
11
- FieldTypes = Union[str, int]
12
-
13
-
14
- class NextData:
15
- def __init__(self, data: dict):
16
- self.data = data
17
-
18
- @property
19
- def json(self) -> dict:
20
- return self.data
21
-
22
- @property
23
- def queries(self) -> List[dict]:
24
- return self.data["props"]["pageProps"]["dehydratedState"]["queries"]
25
-
26
- def __getitem__(self, item):
27
- return self.data[item]
1
+ from enum import Enum
2
+ from typing import List, Union
3
+
4
+
5
+ class Field(str, Enum):
6
+ ID = "id"
7
+ TEXT = "text"
8
+ ENGLISH_TEXT = "textEng"
9
+
10
+
11
+ FieldTypes = Union[str, int]
12
+
13
+
14
+ class NextData:
15
+ def __init__(self, data: dict):
16
+ self.data = data
17
+
18
+ @property
19
+ def json(self) -> dict:
20
+ return self.data
21
+
22
+ @property
23
+ def queries(self) -> List[dict]:
24
+ return self.data["props"]["pageProps"]["dehydratedState"]["queries"]
25
+
26
+ def __getitem__(self, item):
27
+ return self.data[item]
@@ -1,45 +1,46 @@
1
- from pydantic import BaseModel
2
- from enum import Enum
3
- from typing import Optional, Tuple
4
-
5
- PriceRange = Tuple[int, int]
6
-
7
-
8
- class OrderBy(int, Enum):
9
- DATE = 1
10
- PRICE_LOWEST_TO_HIGHEST = 3
11
- PRICE_HIGHEST_TO_LOWEST = 4
12
- ...
13
-
14
-
15
- def format_number_range(number_range: Optional[Tuple[int, int]]) -> Optional[str]:
16
- if number_range is None:
17
- return None
18
-
19
- try:
20
- min_value, max_value = min(*number_range), max(*number_range)
21
- except TypeError:
22
- raise ValueError("Number range is incomplete, both values must be set")
23
-
24
- return f"{min_value}-{max_value}"
25
-
26
-
27
- class QueryFilters(BaseModel):
28
- page: Optional[int] = None
29
- order_by: Optional[OrderBy] = None
30
- price_range: Optional[PriceRange] = None
31
- ...
32
-
33
- def to_dict_raw(self) -> dict:
34
- return {
35
- "page": self.page,
36
- "Order": self.order_by,
37
- "price": format_number_range(self.price_range)
38
- }
39
-
40
- # TODO: add helper methods for managing the attribute values
41
-
42
- def __iter__(self):
43
- for key, value in self.to_dict_raw().items():
44
- if value is not None:
45
- yield key, value
1
+ from pydantic import BaseModel
2
+ from enum import Enum
3
+ from typing import Optional, Tuple
4
+
5
+ PriceRange = Tuple[int, int]
6
+
7
+
8
+ class OrderBy(int, Enum):
9
+ DATE = 1
10
+ PRICE_LOWEST_TO_HIGHEST = 3
11
+ PRICE_HIGHEST_TO_LOWEST = 4
12
+ ...
13
+
14
+
15
+ def format_number_range(number_range: Optional[Tuple[int, int]]) -> Optional[str]:
16
+ if number_range is None:
17
+ return None
18
+
19
+ try:
20
+ min_value, max_value = min(*number_range), max(*number_range)
21
+ except TypeError:
22
+ raise ValueError("Number range is incomplete, both values must be set")
23
+
24
+ return f"{min_value}-{max_value}"
25
+
26
+
27
+ class QueryFilters(BaseModel):
28
+ page: Optional[int] = None
29
+ order_by: Optional[OrderBy] = None
30
+ price_range: Optional[PriceRange] = None
31
+ ...
32
+
33
+ def to_params(self) -> dict:
34
+ return {
35
+ "page": self.page,
36
+ "Order": self.order_by,
37
+ "price": format_number_range(self.price_range)
38
+ }
39
+
40
+ def to_clean_params(self):
41
+ return {key: value for key, value in self.to_params().items() if value is not None}
42
+
43
+ # TODO: add helper methods for managing the attribute values
44
+
45
+ def __iter__(self):
46
+ yield from self.to_clean_params().items()
@@ -0,0 +1,174 @@
1
+ import logging
2
+ import httpx
3
+ import time
4
+ import random
5
+ from typing import Optional, Dict, Any, Tuple, Union, Type, TypeVar
6
+
7
+ from yad2_scraper.category import Yad2Category
8
+ from yad2_scraper.query import QueryFilters
9
+ from yad2_scraper.utils import get_random_user_agent
10
+ from yad2_scraper.exceptions import AntiBotDetectedError, UnexpectedContentError, MaxRequestRetriesExceededError
11
+ from yad2_scraper.constants import (
12
+ DEFAULT_REQUEST_HEADERS,
13
+ ALLOW_REQUEST_REDIRECTS,
14
+ VERIFY_REQUEST_SSL,
15
+ ANTIBOT_CONTENT_IDENTIFIER,
16
+ YAD2_CONTENT_IDENTIFIER
17
+ )
18
+
19
+ Category = TypeVar("Category", bound=Yad2Category)
20
+ DelayRange = Tuple[float, float]
21
+ QueryParamTypes = Union[QueryFilters, Dict[str, Any]]
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class Yad2Scraper:
27
+ def __init__(
28
+ self,
29
+ client: Optional[httpx.Client] = None,
30
+ request_defaults: Optional[Dict[str, Any]] = None,
31
+ randomize_user_agent: bool = False,
32
+ random_delay_range: Optional[DelayRange] = None,
33
+ max_retries: int = 0
34
+ ):
35
+ self.client = client or httpx.Client(
36
+ headers=DEFAULT_REQUEST_HEADERS,
37
+ follow_redirects=ALLOW_REQUEST_REDIRECTS,
38
+ verify=VERIFY_REQUEST_SSL
39
+ )
40
+ self.request_defaults = request_defaults or {}
41
+ self.randomize_user_agent = randomize_user_agent
42
+ self.random_delay_range = random_delay_range
43
+ self.max_retries = max_retries
44
+
45
+ logger.debug(f"Scraper initialized with client: {self.client}")
46
+
47
+ def fetch_category(
48
+ self,
49
+ url: str,
50
+ params: Optional[QueryParamTypes] = None,
51
+ category_type: Type[Category] = Yad2Category
52
+ ) -> Category:
53
+ logger.debug(f"Fetching category from URL: '{url}'")
54
+ response = self.get(url, params)
55
+ logger.debug(f"Category fetched successfully from URL: '{url}'")
56
+ return category_type.from_html_io(response)
57
+
58
+ def get(self, url: str, params: Optional[QueryParamTypes] = None) -> httpx.Response:
59
+ return self.request("GET", url, params=params)
60
+
61
+ def request(self, method: str, url: str, params: Optional[QueryParamTypes] = None) -> httpx.Response:
62
+ request_options = self._prepare_request_options(params=params)
63
+
64
+ try:
65
+ return self._send_request(method, url, request_options)
66
+ except Exception as error:
67
+ return self._handle_request_error(method, url, request_options, error)
68
+
69
+ def set_user_agent(self, user_agent: str) -> None:
70
+ self.client.headers["User-Agent"] = user_agent
71
+ logger.debug(f"User-Agent client header set to: '{user_agent}'")
72
+
73
+ def set_no_script(self, no_script: bool) -> None:
74
+ value = "1" if no_script else "0"
75
+ self.client.cookies.set("noscript", value)
76
+ logger.debug(f"noscript client cookie set to: '{value}'")
77
+
78
+ def close(self) -> None:
79
+ logger.debug("Closing scraper client")
80
+ self.client.close()
81
+ logger.info("Scraper client closed")
82
+
83
+ def _send_request(self, method: str, url: str, request_options: Dict[str, Any]) -> httpx.Response:
84
+ if self.randomize_user_agent:
85
+ self._set_random_user_agent(request_options)
86
+
87
+ if self.random_delay_range:
88
+ self._apply_request_delay()
89
+
90
+ logger.info(f"Making {method} request to URL: '{url}'")
91
+ response = self.client.request(method, url, **request_options)
92
+ logger.debug(f"Received response with status code: {response.status_code}")
93
+ self._validate_response(response)
94
+
95
+ return response
96
+
97
+ def _handle_request_error(
98
+ self,
99
+ method: str,
100
+ url: str,
101
+ request_options: Dict[str, Any],
102
+ error: Exception
103
+ ) -> httpx.Response:
104
+ logger.error(f"{method} request to '{url}' failed: {error}")
105
+
106
+ if self.max_retries == 0:
107
+ raise error
108
+
109
+ return self._retry_request(method, url, request_options)
110
+
111
+ def _retry_request(self, method: str, url: str, request_options: Dict[str, Any]) -> httpx.Response:
112
+ logger.info(f"Retrying {method} request to '{url}' (max retries: {self.max_retries})")
113
+
114
+ errors = []
115
+
116
+ for retry_attempt in range(1, self.max_retries + 1):
117
+ try:
118
+ logger.debug(f"Retry attempt {retry_attempt}/{self.max_retries}")
119
+ return self._send_request(method, url, request_options)
120
+ except Exception as error:
121
+ logger.warning(f"Retry attempt {retry_attempt} failed: {error}")
122
+ errors.append(error)
123
+
124
+ error_to_raise = MaxRequestRetriesExceededError(method, url, self.max_retries, errors)
125
+ logger.error(str(error_to_raise))
126
+ raise error_to_raise from errors[-1]
127
+
128
+ def _prepare_request_options(self, params: Optional[QueryParamTypes] = None) -> Dict[str, Any]:
129
+ logger.debug("Preparing request options from defaults")
130
+ request_options = self.request_defaults.copy()
131
+
132
+ if params:
133
+ request_options.setdefault("params", {}).update(params)
134
+ logger.debug(f"Updated request options with query params: {params}")
135
+
136
+ return request_options
137
+
138
+ def _apply_request_delay(self):
139
+ delay = random.uniform(*self.random_delay_range)
140
+ logger.debug(f"Applying request delay of {delay:.2f} seconds")
141
+ time.sleep(delay)
142
+
143
+ @staticmethod
144
+ def _set_random_user_agent(request_options: Dict[str, str]):
145
+ user_agent = get_random_user_agent()
146
+ request_options.setdefault("headers", {})["User-Agent"] = user_agent
147
+ logger.debug(f"Updated request options with random User-Agent header: '{user_agent}'")
148
+
149
+ @staticmethod
150
+ def _validate_response(response: httpx.Response):
151
+ response.raise_for_status()
152
+
153
+ if ANTIBOT_CONTENT_IDENTIFIER in response.content:
154
+ raise AntiBotDetectedError(
155
+ f"The response contains Anti-Bot content",
156
+ request=response.request,
157
+ response=response
158
+ )
159
+ if YAD2_CONTENT_IDENTIFIER not in response.content:
160
+ raise UnexpectedContentError(
161
+ "The response does not contain yad2 content",
162
+ request=response.request,
163
+ response=response
164
+ )
165
+
166
+ logger.debug("Response validation succeeded")
167
+
168
+ def __enter__(self):
169
+ logger.debug("Entering scraper context")
170
+ return self
171
+
172
+ def __exit__(self, exc_type, exc_val, exc_tb):
173
+ logger.debug("Exiting scraper context")
174
+ self.close()
@@ -1,39 +1,28 @@
1
- import httpx
2
- from fake_useragent import FakeUserAgent
3
- from bs4 import BeautifulSoup, Tag
4
- from typing import Union, List
5
-
6
- from yad2_scraper.exceptions import AntiBotDetectedError
7
- from yad2_scraper.constants import ANTIBOT_CONTENT
8
-
9
- fua = FakeUserAgent()
10
-
11
-
12
- def get_random_user_agent() -> str:
13
- return fua.random
14
-
15
-
16
- def join_url(url: str, path: str) -> str:
17
- return url.rstrip("/") + "/" + path.lstrip("/")
18
-
19
-
20
- def get_parent_url(url: str) -> str:
21
- if url.count("/") <= 2:
22
- return url
23
-
24
- return url.rstrip("/").rsplit("/", 1)[0]
25
-
26
-
27
- def validate_http_response(response: httpx.Response):
28
- response.raise_for_status()
29
-
30
- if ANTIBOT_CONTENT in response.content:
31
- raise AntiBotDetectedError(f"The response contains Anti-Bot content")
32
-
33
-
34
- def find_html_tag_by_class_substring(e: Union[BeautifulSoup, Tag], tag_name: str, substring: str) -> Tag:
35
- return e.find(tag_name, class_=lambda class_name: class_name and substring in class_name)
36
-
37
-
38
- def find_all_html_tags_by_class_substring(e: Union[BeautifulSoup, Tag], tag_name: str, substring: str) -> List[Tag]:
39
- return e.find_all(tag_name, class_=lambda class_name: class_name and substring in class_name)
1
+ from fake_useragent import FakeUserAgent
2
+ from bs4 import BeautifulSoup, Tag
3
+ from typing import Union, List
4
+
5
+ fua = FakeUserAgent()
6
+
7
+
8
+ def get_random_user_agent() -> str:
9
+ return fua.random
10
+
11
+
12
+ def join_url(url: str, path: str) -> str:
13
+ return url.rstrip("/") + "/" + path.lstrip("/")
14
+
15
+
16
+ def get_parent_url(url: str) -> str:
17
+ if url.count("/") <= 2:
18
+ return url
19
+
20
+ return url.rstrip("/").rsplit("/", 1)[0]
21
+
22
+
23
+ def find_html_tag_by_class_substring(e: Union[BeautifulSoup, Tag], tag_name: str, substring: str) -> Tag:
24
+ return e.find(tag_name, class_=lambda class_name: class_name and substring in class_name)
25
+
26
+
27
+ def find_all_html_tags_by_class_substring(e: Union[BeautifulSoup, Tag], tag_name: str, substring: str) -> List[Tag]:
28
+ return e.find_all(tag_name, class_=lambda class_name: class_name and substring in class_name)
@@ -1,2 +0,0 @@
1
- class AntiBotDetectedError(Exception):
2
- pass
@@ -1,107 +0,0 @@
1
- import logging
2
- import httpx
3
- import time
4
- import random
5
- from typing import Optional, Dict, Any, Tuple, Union, Type, TypeVar
6
-
7
- from yad2_scraper.category import Yad2Category
8
- from yad2_scraper.query import QueryFilters
9
- from yad2_scraper.utils import get_random_user_agent, validate_http_response
10
- from yad2_scraper.constants import (
11
- DEFAULT_REQUEST_HEADERS,
12
- ALLOW_REQUEST_REDIRECTS,
13
- VERIFY_REQUEST_SSL
14
- )
15
-
16
- Category = TypeVar("Category", bound=Yad2Category)
17
- DelayRange = Tuple[float, float]
18
- QueryParams = Union[QueryFilters, Dict[str, Any]]
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- class Yad2Scraper:
24
- def __init__(
25
- self,
26
- session: Optional[httpx.Client] = None,
27
- request_kwargs: Dict[str, Any] = None,
28
- randomize_user_agent: bool = False,
29
- requests_delay_range: Optional[DelayRange] = None,
30
- ):
31
- self.session = session or httpx.Client(
32
- headers=DEFAULT_REQUEST_HEADERS,
33
- follow_redirects=ALLOW_REQUEST_REDIRECTS,
34
- verify=VERIFY_REQUEST_SSL
35
- )
36
- self.request_kwargs = request_kwargs or {}
37
- self.randomize_user_agent = randomize_user_agent
38
- self.requests_delay_range = requests_delay_range
39
-
40
- logger.debug(f"Initialized with session {self.session} and request kwargs: {self.request_kwargs}")
41
-
42
- def set_user_agent(self, user_agent: str):
43
- self.session.headers["User-Agent"] = user_agent
44
- logger.debug(f"User-Agent session header set to: '{user_agent}'")
45
-
46
- def set_no_script(self, no_script: bool):
47
- value = '1' if no_script else '0' # str(int(no_script))
48
- self.session.cookies.set("noscript", value)
49
- logger.debug(f"NoScript session cookie set to: '{value}'")
50
-
51
- def fetch_category(
52
- self,
53
- url: str,
54
- query_params: Optional[QueryParams] = None,
55
- category_type: Type[Category] = Yad2Category
56
- ) -> Category:
57
- logger.debug(f"Fetching category from URL: '{url}'")
58
- response = self.get(url, query_params)
59
- logger.debug(f"Category fetched successfully from URL: '{url}'")
60
- return category_type.from_html_io(response)
61
-
62
- def get(self, url: str, query_params: Optional[QueryParams] = None) -> httpx.Response:
63
- return self.request("GET", url, query_params=query_params)
64
-
65
- def request(self, method: str, url: str, query_params: Optional[QueryParams] = None) -> httpx.Response:
66
- request_kwargs = self._prepare_request_kwargs(query_params=query_params)
67
-
68
- if self.requests_delay_range:
69
- self._apply_request_delay()
70
-
71
- try:
72
- logger.info(f"Making {method} request to URL: '{url}'") # request kwargs not logged - may be sensitive
73
- response = self.session.request(method, url, **request_kwargs)
74
- logger.debug(f"Received response with status code: {response.status_code}")
75
-
76
- validate_http_response(response)
77
- logger.debug("Response validation succeeded")
78
- except Exception as error:
79
- logger.error(f"Request to '{url}' failed: {error}")
80
- raise error
81
-
82
- return response
83
-
84
- def _prepare_request_kwargs(self, query_params: Optional[QueryParams] = None) -> Dict[str, Any]:
85
- logger.debug("Preparing request kwargs from defaults")
86
- request_kwargs = self.request_kwargs.copy()
87
-
88
- if query_params:
89
- request_kwargs.setdefault("params", {}).update(query_params)
90
- logger.debug(f"Updated request kwargs with query params: {query_params}")
91
-
92
- if self.randomize_user_agent:
93
- random_user_agent = get_random_user_agent()
94
- request_kwargs.setdefault("headers", {})["User-Agent"] = random_user_agent
95
- logger.debug(f"Updated request kwargs with random 'User-Agent' header: '{random_user_agent}'")
96
-
97
- return request_kwargs
98
-
99
- def _apply_request_delay(self):
100
- delay = random.uniform(*self.requests_delay_range)
101
- logger.debug(f"Applying request delay of {delay:.2f} seconds")
102
- time.sleep(delay)
103
-
104
- def close(self):
105
- logger.debug("Closing scraper session")
106
- self.session.close()
107
- logger.info("Scraper session closed")