scrapling 0.2.98__py3-none-any.whl → 0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. scrapling/__init__.py +18 -31
  2. scrapling/cli.py +818 -20
  3. scrapling/core/_html_utils.py +348 -0
  4. scrapling/core/_types.py +34 -17
  5. scrapling/core/ai.py +611 -0
  6. scrapling/core/custom_types.py +183 -100
  7. scrapling/core/mixins.py +27 -19
  8. scrapling/core/shell.py +647 -0
  9. scrapling/core/{storage_adaptors.py → storage.py} +41 -33
  10. scrapling/core/translator.py +20 -26
  11. scrapling/core/utils.py +49 -54
  12. scrapling/engines/__init__.py +15 -6
  13. scrapling/engines/_browsers/__init__.py +2 -0
  14. scrapling/engines/_browsers/_camoufox.py +745 -0
  15. scrapling/engines/_browsers/_config_tools.py +130 -0
  16. scrapling/engines/_browsers/_controllers.py +630 -0
  17. scrapling/engines/_browsers/_page.py +93 -0
  18. scrapling/engines/_browsers/_validators.py +150 -0
  19. scrapling/engines/constants.py +101 -88
  20. scrapling/engines/static.py +667 -110
  21. scrapling/engines/toolbelt/__init__.py +20 -6
  22. scrapling/engines/toolbelt/bypasses/playwright_fingerprint.js +2 -1
  23. scrapling/engines/toolbelt/convertor.py +254 -0
  24. scrapling/engines/toolbelt/custom.py +205 -186
  25. scrapling/engines/toolbelt/fingerprints.py +32 -46
  26. scrapling/engines/toolbelt/navigation.py +68 -39
  27. scrapling/fetchers.py +255 -260
  28. scrapling/parser.py +781 -449
  29. scrapling-0.3.dist-info/METADATA +409 -0
  30. scrapling-0.3.dist-info/RECORD +41 -0
  31. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/WHEEL +1 -1
  32. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/top_level.txt +0 -1
  33. scrapling/defaults.py +0 -19
  34. scrapling/engines/camo.py +0 -299
  35. scrapling/engines/pw.py +0 -428
  36. scrapling/engines/toolbelt/bypasses/pdf_viewer.js +0 -5
  37. scrapling-0.2.98.dist-info/METADATA +0 -867
  38. scrapling-0.2.98.dist-info/RECORD +0 -49
  39. tests/__init__.py +0 -1
  40. tests/fetchers/__init__.py +0 -1
  41. tests/fetchers/async/__init__.py +0 -0
  42. tests/fetchers/async/test_camoufox.py +0 -95
  43. tests/fetchers/async/test_httpx.py +0 -83
  44. tests/fetchers/async/test_playwright.py +0 -99
  45. tests/fetchers/sync/__init__.py +0 -0
  46. tests/fetchers/sync/test_camoufox.py +0 -68
  47. tests/fetchers/sync/test_httpx.py +0 -82
  48. tests/fetchers/sync/test_playwright.py +0 -87
  49. tests/fetchers/test_utils.py +0 -97
  50. tests/parser/__init__.py +0 -0
  51. tests/parser/test_automatch.py +0 -111
  52. tests/parser/test_general.py +0 -330
  53. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/entry_points.txt +0 -0
  54. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info/licenses}/LICENSE +0 -0
@@ -1,19 +1,29 @@
1
1
  """
2
2
  Functions related to custom types or type checking
3
3
  """
4
- import inspect
4
+
5
5
  from email.message import Message
6
6
 
7
- from scrapling.core._types import (Any, Callable, Dict, List, Optional, Tuple,
8
- Type, Union)
7
+ from scrapling.core._types import (
8
+ Any,
9
+ Dict,
10
+ List,
11
+ Optional,
12
+ Tuple,
13
+ )
9
14
  from scrapling.core.custom_types import MappingProxyType
10
15
  from scrapling.core.utils import log, lru_cache
11
- from scrapling.parser import Adaptor, SQLiteStorageSystem
16
+ from scrapling.parser import Selector, SQLiteStorageSystem
12
17
 
13
18
 
14
19
  class ResponseEncoding:
15
20
  __DEFAULT_ENCODING = "utf-8"
16
- __ISO_8859_1_CONTENT_TYPES = {"text/plain", "text/html", "text/css", "text/javascript"}
21
+ __ISO_8859_1_CONTENT_TYPES = {
22
+ "text/plain",
23
+ "text/html",
24
+ "text/css",
25
+ "text/javascript",
26
+ }
17
27
 
18
28
  @classmethod
19
29
  @lru_cache(maxsize=128)
@@ -27,19 +37,21 @@ class ResponseEncoding:
27
37
  """
28
38
  # Create a Message object and set the Content-Type header then get the content type and parameters
29
39
  msg = Message()
30
- msg['content-type'] = header_value
40
+ msg["content-type"] = header_value
31
41
 
32
42
  content_type = msg.get_content_type()
33
43
  params = dict(msg.get_params(failobj=[]))
34
44
 
35
45
  # Remove the content-type from params if present somehow
36
- params.pop('content-type', None)
46
+ params.pop("content-type", None)
37
47
 
38
48
  return content_type, params
39
49
 
40
50
  @classmethod
41
51
  @lru_cache(maxsize=128)
42
- def get_value(cls, content_type: Optional[str], text: Optional[str] = 'test') -> str:
52
+ def get_value(
53
+ cls, content_type: Optional[str], text: Optional[str] = "test"
54
+ ) -> str:
43
55
  """Determine the appropriate character encoding from a content-type header.
44
56
 
45
57
  The encoding is determined by these rules in order:
@@ -72,7 +84,9 @@ class ResponseEncoding:
72
84
  encoding = cls.__DEFAULT_ENCODING
73
85
 
74
86
  if encoding:
75
- _ = text.encode(encoding) # Validate encoding and validate it can encode the given text
87
+ _ = text.encode(
88
+ encoding
89
+ ) # Validate encoding and validate it can encode the given text
76
90
  return encoding
77
91
 
78
92
  return cls.__DEFAULT_ENCODING
@@ -81,136 +95,209 @@ class ResponseEncoding:
81
95
  return cls.__DEFAULT_ENCODING
82
96
 
83
97
 
84
- class Response(Adaptor):
98
+ class Response(Selector):
85
99
  """This class is returned by all engines as a way to unify response type between different libraries."""
86
100
 
87
- def __init__(self, url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict,
88
- encoding: str = 'utf-8', method: str = 'GET', history: List = None, **adaptor_arguments: Dict):
89
- automatch_domain = adaptor_arguments.pop('automatch_domain', None)
101
+ def __init__(
102
+ self,
103
+ url: str,
104
+ content: str | bytes,
105
+ status: int,
106
+ reason: str,
107
+ cookies: Tuple[Dict[str, str], ...] | Dict[str, str],
108
+ headers: Dict,
109
+ request_headers: Dict,
110
+ encoding: str = "utf-8",
111
+ method: str = "GET",
112
+ history: List = None,
113
+ **selector_config: Dict,
114
+ ):
115
+ adaptive_domain = selector_config.pop("adaptive_domain", None)
90
116
  self.status = status
91
117
  self.reason = reason
92
118
  self.cookies = cookies
93
119
  self.headers = headers
94
120
  self.request_headers = request_headers
95
121
  self.history = history or []
96
- encoding = ResponseEncoding.get_value(encoding, text)
97
- super().__init__(text=text, body=body, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
98
- # For back-ward compatibility
99
- self.adaptor = self
122
+ encoding = ResponseEncoding.get_value(
123
+ encoding, content.decode("utf-8") if isinstance(content, bytes) else content
124
+ )
125
+ super().__init__(
126
+ content=content,
127
+ url=adaptive_domain or url,
128
+ encoding=encoding,
129
+ **selector_config,
130
+ )
100
131
  # For easier debugging while working from a Python shell
101
- log.info(f'Fetched ({status}) <{method} {url}> (referer: {request_headers.get("referer")})')
102
-
103
- # def __repr__(self):
104
- # return f'<{self.__class__.__name__} [{self.status} {self.reason}]>'
132
+ log.info(
133
+ f"Fetched ({status}) <{method} {url}> (referer: {request_headers.get('referer')})"
134
+ )
105
135
 
106
136
 
107
137
  class BaseFetcher:
108
- def __init__(
109
- self, huge_tree: bool = True, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = True,
110
- storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None,
111
- automatch_domain: Optional[str] = None, keep_cdata: Optional[bool] = False,
112
- ):
113
- """Arguments below are the same from the Adaptor class so you can pass them directly, the rest of Adaptor's arguments
114
- are detected and passed automatically from the Fetcher based on the response for accessibility.
115
-
116
- :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
117
- libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
118
- :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
119
- :param keep_cdata: While parsing the HTML body, drop cdata or not. Disabled by default for cleaner HTML.
120
- :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
121
- priority over all auto-match related arguments/functions in the class.
122
- :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
123
- :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
124
- If empty, default values will be used.
125
- :param automatch_domain: For cases where you want to automatch selectors across different websites as if they were on the same website, use this argument to unify them.
126
- Otherwise, the domain of the request is used by default.
138
+ __slots__ = ()
139
+ huge_tree: bool = True
140
+ adaptive: Optional[bool] = False
141
+ storage: Any = SQLiteStorageSystem
142
+ keep_cdata: Optional[bool] = False
143
+ storage_args: Optional[Dict] = None
144
+ keep_comments: Optional[bool] = False
145
+ adaptive_domain: Optional[str] = None
146
+ parser_keywords: Tuple = (
147
+ "huge_tree",
148
+ "adaptive",
149
+ "storage",
150
+ "keep_cdata",
151
+ "storage_args",
152
+ "keep_comments",
153
+ "adaptive_domain",
154
+ ) # Left open for the user
155
+
156
+ def __init__(self, *args, **kwargs):
157
+ # For backward-compatibility before 0.2.99
158
+ args_str = ", ".join(args) or ""
159
+ kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items()) or ""
160
+ if args_str:
161
+ args_str += ", "
162
+
163
+ log.warning(
164
+ f"This logic is deprecated now, and have no effect; It will be removed with v0.3. Use `{self.__class__.__name__}.configure({args_str}{kwargs_str})` instead before fetching"
165
+ )
166
+ pass
167
+
168
+ @classmethod
169
+ def display_config(cls):
170
+ return dict(
171
+ huge_tree=cls.huge_tree,
172
+ keep_comments=cls.keep_comments,
173
+ keep_cdata=cls.keep_cdata,
174
+ adaptive=cls.adaptive,
175
+ storage=cls.storage,
176
+ storage_args=cls.storage_args,
177
+ adaptive_domain=cls.adaptive_domain,
178
+ )
179
+
180
+ @classmethod
181
+ def configure(cls, **kwargs):
182
+ """Set multiple arguments for the parser at once globally
183
+
184
+ :param kwargs: The keywords can be any arguments of the following: huge_tree, keep_comments, keep_cdata, adaptive, storage, storage_args, adaptive_domain
127
185
  """
128
- # Adaptor class parameters
129
- # I won't validate Adaptor's class parameters here again, I will leave it to be validated later
130
- self.adaptor_arguments = dict(
131
- huge_tree=huge_tree,
132
- keep_comments=keep_comments,
133
- keep_cdata=keep_cdata,
134
- auto_match=auto_match,
135
- storage=storage,
136
- storage_args=storage_args
186
+ for key, value in kwargs.items():
187
+ key = key.strip().lower()
188
+ if hasattr(cls, key):
189
+ if key in cls.parser_keywords:
190
+ setattr(cls, key, value)
191
+ else:
192
+ # Yup, no fun allowed LOL
193
+ raise AttributeError(
194
+ f'Unknown parser argument: "{key}"; maybe you meant {cls.parser_keywords}?'
195
+ )
196
+ else:
197
+ raise ValueError(
198
+ f'Unknown parser argument: "{key}"; maybe you meant {cls.parser_keywords}?'
199
+ )
200
+
201
+ if not kwargs:
202
+ raise AttributeError(
203
+ f"You must pass a keyword to configure, current keywords: {cls.parser_keywords}?"
204
+ )
205
+
206
+ @classmethod
207
+ def _generate_parser_arguments(cls) -> Dict:
208
+ # Selector class parameters
209
+ # I won't validate Selector's class parameters here again, I will leave it to be validated later
210
+ parser_arguments = dict(
211
+ huge_tree=cls.huge_tree,
212
+ keep_comments=cls.keep_comments,
213
+ keep_cdata=cls.keep_cdata,
214
+ adaptive=cls.adaptive,
215
+ storage=cls.storage,
216
+ storage_args=cls.storage_args,
137
217
  )
138
- if automatch_domain:
139
- if type(automatch_domain) is not str:
140
- log.warning('[Ignored] The argument "automatch_domain" must be of string type')
218
+ if cls.adaptive_domain:
219
+ if not isinstance(cls.adaptive_domain, str):
220
+ log.warning(
221
+ '[Ignored] The argument "adaptive_domain" must be of string type'
222
+ )
141
223
  else:
142
- self.adaptor_arguments.update({'automatch_domain': automatch_domain})
224
+ parser_arguments.update({"adaptive_domain": cls.adaptive_domain})
225
+
226
+ return parser_arguments
143
227
 
144
228
 
145
229
  class StatusText:
146
230
  """A class that gets the status text of response status code.
147
231
 
148
- Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
232
+ Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
149
233
  """
150
- _phrases = MappingProxyType({
151
- 100: "Continue",
152
- 101: "Switching Protocols",
153
- 102: "Processing",
154
- 103: "Early Hints",
155
- 200: "OK",
156
- 201: "Created",
157
- 202: "Accepted",
158
- 203: "Non-Authoritative Information",
159
- 204: "No Content",
160
- 205: "Reset Content",
161
- 206: "Partial Content",
162
- 207: "Multi-Status",
163
- 208: "Already Reported",
164
- 226: "IM Used",
165
- 300: "Multiple Choices",
166
- 301: "Moved Permanently",
167
- 302: "Found",
168
- 303: "See Other",
169
- 304: "Not Modified",
170
- 305: "Use Proxy",
171
- 307: "Temporary Redirect",
172
- 308: "Permanent Redirect",
173
- 400: "Bad Request",
174
- 401: "Unauthorized",
175
- 402: "Payment Required",
176
- 403: "Forbidden",
177
- 404: "Not Found",
178
- 405: "Method Not Allowed",
179
- 406: "Not Acceptable",
180
- 407: "Proxy Authentication Required",
181
- 408: "Request Timeout",
182
- 409: "Conflict",
183
- 410: "Gone",
184
- 411: "Length Required",
185
- 412: "Precondition Failed",
186
- 413: "Payload Too Large",
187
- 414: "URI Too Long",
188
- 415: "Unsupported Media Type",
189
- 416: "Range Not Satisfiable",
190
- 417: "Expectation Failed",
191
- 418: "I'm a teapot",
192
- 421: "Misdirected Request",
193
- 422: "Unprocessable Entity",
194
- 423: "Locked",
195
- 424: "Failed Dependency",
196
- 425: "Too Early",
197
- 426: "Upgrade Required",
198
- 428: "Precondition Required",
199
- 429: "Too Many Requests",
200
- 431: "Request Header Fields Too Large",
201
- 451: "Unavailable For Legal Reasons",
202
- 500: "Internal Server Error",
203
- 501: "Not Implemented",
204
- 502: "Bad Gateway",
205
- 503: "Service Unavailable",
206
- 504: "Gateway Timeout",
207
- 505: "HTTP Version Not Supported",
208
- 506: "Variant Also Negotiates",
209
- 507: "Insufficient Storage",
210
- 508: "Loop Detected",
211
- 510: "Not Extended",
212
- 511: "Network Authentication Required"
213
- })
234
+
235
+ _phrases = MappingProxyType(
236
+ {
237
+ 100: "Continue",
238
+ 101: "Switching Protocols",
239
+ 102: "Processing",
240
+ 103: "Early Hints",
241
+ 200: "OK",
242
+ 201: "Created",
243
+ 202: "Accepted",
244
+ 203: "Non-Authoritative Information",
245
+ 204: "No Content",
246
+ 205: "Reset Content",
247
+ 206: "Partial Content",
248
+ 207: "Multi-Status",
249
+ 208: "Already Reported",
250
+ 226: "IM Used",
251
+ 300: "Multiple Choices",
252
+ 301: "Moved Permanently",
253
+ 302: "Found",
254
+ 303: "See Other",
255
+ 304: "Not Modified",
256
+ 305: "Use Proxy",
257
+ 307: "Temporary Redirect",
258
+ 308: "Permanent Redirect",
259
+ 400: "Bad Request",
260
+ 401: "Unauthorized",
261
+ 402: "Payment Required",
262
+ 403: "Forbidden",
263
+ 404: "Not Found",
264
+ 405: "Method Not Allowed",
265
+ 406: "Not Acceptable",
266
+ 407: "Proxy Authentication Required",
267
+ 408: "Request Timeout",
268
+ 409: "Conflict",
269
+ 410: "Gone",
270
+ 411: "Length Required",
271
+ 412: "Precondition Failed",
272
+ 413: "Payload Too Large",
273
+ 414: "URI Too Long",
274
+ 415: "Unsupported Media Type",
275
+ 416: "Range Not Satisfiable",
276
+ 417: "Expectation Failed",
277
+ 418: "I'm a teapot",
278
+ 421: "Misdirected Request",
279
+ 422: "Unprocessable Entity",
280
+ 423: "Locked",
281
+ 424: "Failed Dependency",
282
+ 425: "Too Early",
283
+ 426: "Upgrade Required",
284
+ 428: "Precondition Required",
285
+ 429: "Too Many Requests",
286
+ 431: "Request Header Fields Too Large",
287
+ 451: "Unavailable For Legal Reasons",
288
+ 500: "Internal Server Error",
289
+ 501: "Not Implemented",
290
+ 502: "Bad Gateway",
291
+ 503: "Service Unavailable",
292
+ 504: "Gateway Timeout",
293
+ 505: "HTTP Version Not Supported",
294
+ 506: "Variant Also Negotiates",
295
+ 507: "Insufficient Storage",
296
+ 508: "Loop Detected",
297
+ 510: "Not Extended",
298
+ 511: "Network Authentication Required",
299
+ }
300
+ )
214
301
 
215
302
  @classmethod
216
303
  @lru_cache(maxsize=128)
@@ -219,32 +306,6 @@ class StatusText:
219
306
  return cls._phrases.get(status_code, "Unknown Status Code")
220
307
 
221
308
 
222
- def check_if_engine_usable(engine: Callable) -> Union[Callable, None]:
223
- """This function check if the passed engine can be used by a Fetcher-type class or not.
224
-
225
- :param engine: The engine class itself
226
- :return: The engine class again if all checks out, otherwise raises error
227
- :raise TypeError: If engine class don't have fetch method, If engine class have fetch attribute not method, or If engine class have fetch function but it doesn't take arguments
228
- """
229
- # if isinstance(engine, type):
230
- # raise TypeError("Expected an engine instance, not a class definition of the engine")
231
-
232
- if hasattr(engine, 'fetch'):
233
- fetch_function = getattr(engine, "fetch")
234
- if callable(fetch_function):
235
- if len(inspect.signature(fetch_function).parameters) > 0:
236
- return engine
237
- else:
238
- # raise TypeError("Engine class instance must have a callable method 'fetch' with the first argument used for the url.")
239
- raise TypeError("Engine class must have a callable method 'fetch' with the first argument used for the url.")
240
- else:
241
- # raise TypeError("Invalid engine instance! Engine class must have a callable method 'fetch'")
242
- raise TypeError("Invalid engine class! Engine class must have a callable method 'fetch'")
243
- else:
244
- # raise TypeError("Invalid engine instance! Engine class must have the method 'fetch'")
245
- raise TypeError("Invalid engine class! Engine class must have the method 'fetch'")
246
-
247
-
248
309
  def get_variable_name(var: Any) -> Optional[str]:
249
310
  """Get the name of a variable using global and local scopes.
250
311
  :param var: The variable to find the name for
@@ -255,45 +316,3 @@ def get_variable_name(var: Any) -> Optional[str]:
255
316
  if value is var:
256
317
  return name
257
318
  return None
258
-
259
-
260
- def check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any = None, critical: bool = False, param_name: Optional[str] = None) -> Any:
261
- """Check if a variable matches the specified type constraints.
262
- :param variable: The variable to check
263
- :param valid_types: List of valid types for the variable
264
- :param default_value: Value to return if type check fails
265
- :param critical: If True, raises TypeError instead of logging error
266
- :param param_name: Optional parameter name for error messages
267
- :return: The original variable if valid, default_value if invalid
268
- :raise TypeError: If critical=True and type check fails
269
- """
270
- # Use provided param_name or try to get it automatically
271
- var_name = param_name or get_variable_name(variable) or "Unknown"
272
-
273
- # Convert valid_types to a list if None
274
- valid_types = valid_types or []
275
-
276
- # Handle None value
277
- if variable is None:
278
- if type(None) in valid_types:
279
- return variable
280
- error_msg = f'Argument "{var_name}" cannot be None'
281
- if critical:
282
- raise TypeError(error_msg)
283
- log.error(f'[Ignored] {error_msg}')
284
- return default_value
285
-
286
- # If no valid_types specified and variable has a value, return it
287
- if not valid_types:
288
- return variable
289
-
290
- # Check if variable type matches any of the valid types
291
- if not any(isinstance(variable, t) for t in valid_types):
292
- type_names = [t.__name__ for t in valid_types]
293
- error_msg = f'Argument "{var_name}" must be of type {" or ".join(type_names)}'
294
- if critical:
295
- raise TypeError(error_msg)
296
- log.error(f'[Ignored] {error_msg}')
297
- return default_value
298
-
299
- return variable
@@ -2,19 +2,20 @@
2
2
  Functions related to generating headers and fingerprints generally
3
3
  """
4
4
 
5
- import platform
5
+ from platform import system as platform_system
6
6
 
7
- from browserforge.fingerprints import Fingerprint, FingerprintGenerator
8
- from browserforge.headers import Browser, HeaderGenerator
9
7
  from tldextract import extract
8
+ from browserforge.headers import Browser, HeaderGenerator
10
9
 
11
- from scrapling.core._types import Dict, Union
10
+ from scrapling.core._types import Dict, Optional
12
11
  from scrapling.core.utils import lru_cache
13
12
 
13
+ __OS_NAME__ = platform_system()
14
+
14
15
 
15
16
  @lru_cache(10, typed=True)
16
17
  def generate_convincing_referer(url: str) -> str:
17
- """Takes the domain from the URL without the subdomain/suffix and make it look like you were searching google for this website
18
+ """Takes the domain from the URL without the subdomain/suffix and make it look like you were searching Google for this website
18
19
 
19
20
  >>> generate_convincing_referer('https://www.somewebsite.com/blah')
20
21
  'https://www.google.com/search?q=somewebsite'
@@ -23,59 +24,44 @@ def generate_convincing_referer(url: str) -> str:
23
24
  :return: Google's search URL of the domain name
24
25
  """
25
26
  website_name = extract(url).domain
26
- return f'https://www.google.com/search?q={website_name}'
27
+ return f"https://www.google.com/search?q={website_name}"
27
28
 
28
29
 
29
30
  @lru_cache(1, typed=True)
30
- def get_os_name() -> Union[str, None]:
31
+ def get_os_name() -> Optional[str]:
31
32
  """Get the current OS name in the same format needed for browserforge
32
33
 
33
34
  :return: Current OS name or `None` otherwise
34
35
  """
35
- #
36
- os_name = platform.system()
37
36
  return {
38
- 'Linux': 'linux',
39
- 'Darwin': 'macos',
40
- 'Windows': 'windows',
41
- # For the future? because why not
42
- 'iOS': 'ios',
43
- }.get(os_name)
44
-
45
-
46
- def generate_suitable_fingerprint() -> Fingerprint:
47
- """Generates a browserforge's fingerprint that matches current OS, desktop device, and Chrome with version 128 at least.
48
-
49
- This function was originally created to test Browserforge's injector.
50
- :return: `Fingerprint` object
51
- """
52
- return FingerprintGenerator(
53
- browser=[Browser(name='chrome', min_version=128)],
54
- os=get_os_name(), # None is ignored
55
- device='desktop'
56
- ).generate()
37
+ "Linux": "linux",
38
+ "Darwin": "macos",
39
+ "Windows": "windows",
40
+ # For the future? because why not?
41
+ "iOS": "ios",
42
+ }.get(__OS_NAME__)
57
43
 
58
44
 
59
45
  def generate_headers(browser_mode: bool = False) -> Dict:
60
46
  """Generate real browser-like headers using browserforge's generator
61
47
 
62
- :param browser_mode: If enabled, the headers created are used for playwright so it have to match everything
48
+ :param browser_mode: If enabled, the headers created are used for playwright, so it has to match everything
63
49
  :return: A dictionary of the generated headers
64
50
  """
65
- if browser_mode:
66
- # In this mode we don't care about anything other than matching the OS and the browser type with the browser we are using
67
- # So we don't raise any inconsistency red flags while websites fingerprinting us
68
- os_name = get_os_name()
69
- return HeaderGenerator(
70
- browser=[Browser(name='chrome', min_version=130)],
71
- os=os_name, # None is ignored
72
- device='desktop'
73
- ).generate()
74
- else:
75
- # Here it's used for normal requests that aren't done through browsers so we can take it lightly
76
- browsers = [
77
- Browser(name='chrome', min_version=120),
78
- Browser(name='firefox', min_version=120),
79
- Browser(name='edge', min_version=120),
80
- ]
81
- return HeaderGenerator(browser=browsers, device='desktop').generate()
51
+ # In the browser mode, we don't care about anything other than matching the OS and the browser type with the browser we are using,
52
+ # So we don't raise any inconsistency red flags while websites fingerprinting us
53
+ os_name = get_os_name()
54
+ browsers = [Browser(name="chrome", min_version=130)]
55
+ if not browser_mode:
56
+ os_name = ("windows", "macos", "linux")
57
+ browsers.extend(
58
+ [
59
+ Browser(name="firefox", min_version=130),
60
+ Browser(name="edge", min_version=130),
61
+ ]
62
+ )
63
+
64
+ return HeaderGenerator(browser=browsers, os=os_name, device="desktop").generate()
65
+
66
+
67
+ __default_useragent__ = generate_headers(browser_mode=False).get("User-Agent")