webscout 2025.10.11__py3-none-any.whl → 2025.10.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (47) hide show
  1. webscout/Provider/Andi.py +1 -1
  2. webscout/Provider/ChatGPTClone.py +2 -1
  3. webscout/__init__.py +1 -4
  4. webscout/auth/routes.py +2 -3
  5. webscout/cli.py +1 -1
  6. webscout/search/__init__.py +51 -0
  7. webscout/search/base.py +195 -0
  8. webscout/search/duckduckgo_main.py +54 -0
  9. webscout/search/engines/__init__.py +48 -0
  10. webscout/search/engines/bing.py +84 -0
  11. webscout/search/engines/bing_news.py +52 -0
  12. webscout/search/engines/brave.py +43 -0
  13. webscout/search/engines/duckduckgo/__init__.py +25 -0
  14. webscout/search/engines/duckduckgo/answers.py +78 -0
  15. webscout/search/engines/duckduckgo/base.py +187 -0
  16. webscout/search/engines/duckduckgo/images.py +97 -0
  17. webscout/search/engines/duckduckgo/maps.py +168 -0
  18. webscout/search/engines/duckduckgo/news.py +68 -0
  19. webscout/search/engines/duckduckgo/suggestions.py +21 -0
  20. webscout/search/engines/duckduckgo/text.py +211 -0
  21. webscout/search/engines/duckduckgo/translate.py +47 -0
  22. webscout/search/engines/duckduckgo/videos.py +63 -0
  23. webscout/search/engines/duckduckgo/weather.py +74 -0
  24. webscout/search/engines/mojeek.py +37 -0
  25. webscout/search/engines/wikipedia.py +56 -0
  26. webscout/search/engines/yahoo.py +65 -0
  27. webscout/search/engines/yahoo_news.py +64 -0
  28. webscout/search/engines/yandex.py +43 -0
  29. webscout/search/engines/yep/__init__.py +13 -0
  30. webscout/search/engines/yep/base.py +32 -0
  31. webscout/search/engines/yep/images.py +99 -0
  32. webscout/search/engines/yep/suggestions.py +35 -0
  33. webscout/search/engines/yep/text.py +114 -0
  34. webscout/search/http_client.py +156 -0
  35. webscout/search/results.py +137 -0
  36. webscout/search/yep_main.py +44 -0
  37. webscout/version.py +1 -1
  38. webscout/version.py.bak +2 -0
  39. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/METADATA +3 -4
  40. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/RECORD +44 -15
  41. webscout/webscout_search.py +0 -1183
  42. webscout/webscout_search_async.py +0 -649
  43. webscout/yep_search.py +0 -346
  44. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
  45. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
  46. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
  47. {webscout-2025.10.11.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
webscout/yep_search.py DELETED
@@ -1,346 +0,0 @@
1
- # Import trio before curl_cffi to prevent eventlet socket monkey-patching conflicts
2
- # See: https://github.com/python-trio/trio/issues/3015
3
- try:
4
- import trio # noqa: F401
5
- except ImportError:
6
- pass # trio is optional, ignore if not available
7
- from concurrent.futures import ThreadPoolExecutor
8
- from typing import Dict, List, Optional
9
- from urllib.parse import urlencode
10
-
11
- from curl_cffi.requests import Session
12
-
13
- from webscout.litagent import LitAgent
14
-
15
-
16
- class YepSearch:
17
- """Yep.com search class to get search results."""
18
-
19
- _executor: ThreadPoolExecutor = ThreadPoolExecutor()
20
-
21
- def __init__(
22
- self,
23
- timeout: int = 20,
24
- proxies: Dict[str, str] | None = None,
25
- verify: bool = True,
26
- impersonate: str = "chrome110"
27
- ):
28
- """Initialize YepSearch.
29
-
30
- Args:
31
- timeout: Timeout value for the HTTP client. Defaults to 20.
32
- proxies: Proxy configuration for requests. Defaults to None.
33
- verify: Verify SSL certificates. Defaults to True.
34
- impersonate: Browser profile to impersonate for curl_cffi. Defaults to "chrome110".
35
- """
36
- self.base_url = "https://api.yep.com/fs/2/search"
37
- self.timeout = timeout
38
- # Initialize curl_cffi session
39
- self.session = Session(
40
- proxies=proxies,
41
- verify=verify,
42
- impersonate=impersonate,
43
- timeout=timeout # Set timeout directly in session
44
- )
45
- self.session.headers.update({
46
- **LitAgent().generate_fingerprint(),
47
- "Origin": "https://yep.com",
48
- "Referer": "https://yep.com/",
49
- })
50
-
51
- # Proxies and verify are handled by the Session constructor now
52
-
53
- def _remove_html_tags(self, text: str) -> str:
54
- """Remove HTML tags from text using simple string manipulation.
55
-
56
- Args:
57
- text: String containing HTML tags
58
-
59
- Returns:
60
- Clean text without HTML tags
61
- """
62
- result = ""
63
- in_tag = False
64
-
65
- for char in text:
66
- if char == '<':
67
- in_tag = True
68
- elif char == '>':
69
- in_tag = False
70
- elif not in_tag:
71
- result += char
72
-
73
- # Replace common HTML entities
74
- replacements = {
75
- '&nbsp;': ' ',
76
- '&amp;': '&',
77
- '&lt;': '<',
78
- '&gt;': '>',
79
- '&quot;': '"',
80
- '&apos;': "'",
81
- }
82
-
83
- for entity, replacement in replacements.items():
84
- result = result.replace(entity, replacement)
85
-
86
- return result.strip()
87
-
88
- def format_results(self, raw_results: dict) -> List[Dict]:
89
- """Format raw API results into a consistent structure."""
90
- formatted_results = []
91
-
92
- if not raw_results or len(raw_results) < 2:
93
- return formatted_results
94
-
95
- results = raw_results[1].get('results', [])
96
-
97
- for result in results:
98
- formatted_result = {
99
- "title": self._remove_html_tags(result.get("title", "")),
100
- "href": result.get("url", ""),
101
- "body": self._remove_html_tags(result.get("snippet", "")),
102
- "source": result.get("visual_url", ""),
103
- "position": len(formatted_results) + 1,
104
- "type": result.get("type", "organic"),
105
- "first_seen": result.get("first_seen", None)
106
- }
107
-
108
- # Add sitelinks if they exist
109
- if "sitelinks" in result:
110
- sitelinks = []
111
- if "full" in result["sitelinks"]:
112
- sitelinks.extend(result["sitelinks"]["full"])
113
- if "short" in result["sitelinks"]:
114
- sitelinks.extend(result["sitelinks"]["short"])
115
-
116
- if sitelinks:
117
- formatted_result["sitelinks"] = [
118
- {
119
- "title": self._remove_html_tags(link.get("title", "")),
120
- "href": link.get("url", "")
121
- }
122
- for link in sitelinks
123
- ]
124
-
125
- formatted_results.append(formatted_result)
126
-
127
- return formatted_results
128
-
129
- def text(
130
- self,
131
- keywords: str,
132
- region: str = "all",
133
- safesearch: str = "moderate",
134
- max_results: Optional[int] = None,
135
- ) -> List[Dict[str, str]]:
136
- """Yep.com text search.
137
-
138
- Args:
139
- keywords: Search query string.
140
- region: Region for search results. Defaults to "all".
141
- safesearch: SafeSearch setting ("on", "moderate", "off"). Defaults to "moderate".
142
- max_results: Maximum number of results to return. Defaults to None.
143
-
144
- Returns:
145
- List of dictionaries containing search results.
146
- """
147
- # Convert safesearch parameter
148
- safe_search_map = {
149
- "on": "on",
150
- "moderate": "moderate",
151
- "off": "off"
152
- }
153
- safe_setting = safe_search_map.get(safesearch.lower(), "moderate")
154
-
155
- params = {
156
- "client": "web",
157
- "gl": region,
158
- "limit": str(max_results) if max_results else "10",
159
- "no_correct": "false",
160
- "q": keywords,
161
- "safeSearch": safe_setting,
162
- "type": "web"
163
- }
164
-
165
- url = f"{self.base_url}?{urlencode(params)}"
166
- try:
167
- # Use the session timeout defined in __init__
168
- response = self.session.get(url)
169
- response.raise_for_status()
170
- raw_results = response.json()
171
-
172
- formatted_results = self.format_results(raw_results)
173
-
174
- if max_results:
175
- return formatted_results[:max_results]
176
- return formatted_results
177
- except Exception as e:
178
- # Provide more specific error context if possible
179
- if hasattr(e, 'response') and e.response is not None:
180
- raise Exception(f"Yep search failed with status {e.response.status_code}: {str(e)}")
181
- else:
182
- raise Exception(f"Yep search failed: {str(e)}")
183
-
184
- def images(
185
- self,
186
- keywords: str,
187
- region: str = "all",
188
- safesearch: str = "moderate",
189
- max_results: Optional[int] = None,
190
- ) -> List[Dict[str, str]]:
191
- """Yep.com image search.
192
-
193
- Args:
194
- keywords: Search query string.
195
- region: Region for search results. Defaults to "all".
196
- safesearch: SafeSearch setting ("on", "moderate", "off"). Defaults to "moderate".
197
- max_results: Maximum number of results to return. Defaults to None.
198
-
199
- Returns:
200
- List of dictionaries containing image search results with keys:
201
- - title: Image title
202
- - image: Full resolution image URL
203
- - thumbnail: Thumbnail image URL
204
- - url: Source page URL
205
- - height: Image height
206
- - width: Image width
207
- - source: Source website domain
208
- """
209
- safe_search_map = {
210
- "on": "on",
211
- "moderate": "moderate",
212
- "off": "off"
213
- }
214
- safe_setting = safe_search_map.get(safesearch.lower(), "moderate")
215
-
216
- params = {
217
- "client": "web",
218
- "gl": region,
219
- "limit": str(max_results) if max_results else "10",
220
- "no_correct": "false",
221
- "q": keywords,
222
- "safeSearch": safe_setting,
223
- "type": "images"
224
- }
225
-
226
- url = f"{self.base_url}?{urlencode(params)}"
227
- try:
228
- # Use the session timeout defined in __init__
229
- response = self.session.get(url)
230
- response.raise_for_status()
231
- raw_results = response.json()
232
-
233
- if not raw_results or len(raw_results) < 2:
234
- return []
235
-
236
- formatted_results = []
237
- results = raw_results[1].get('results', [])
238
-
239
- for result in results:
240
- if result.get("type") != "Image":
241
- continue
242
-
243
- formatted_result = {
244
- "title": self._remove_html_tags(result.get("title", "")),
245
- "image": result.get("image_id", ""),
246
- "thumbnail": result.get("src", ""),
247
- "url": result.get("host_page", ""),
248
- "height": result.get("height", 0),
249
- "width": result.get("width", 0),
250
- "source": result.get("visual_url", "")
251
- }
252
-
253
- # Add high-res thumbnail if available
254
- if "srcset" in result:
255
- formatted_result["thumbnail_hd"] = result["srcset"].split(",")[1].strip().split(" ")[0]
256
-
257
- formatted_results.append(formatted_result)
258
-
259
- if max_results:
260
- return formatted_results[:max_results]
261
- return formatted_results
262
-
263
- except Exception as e:
264
- # Provide more specific error context if possible
265
- if hasattr(e, 'response') and e.response is not None:
266
- raise Exception(f"Yep image search failed with status {e.response.status_code}: {str(e)}")
267
- else:
268
- raise Exception(f"Yep image search failed: {str(e)}")
269
-
270
- def suggestions(
271
- self,
272
- query: str,
273
- region: str = "all",
274
- ) -> List[str]:
275
- """Get search suggestions from Yep.com autocomplete API.
276
-
277
- Args:
278
- query: Search query string to get suggestions for.
279
- region: Region for suggestions. Defaults to "all".
280
-
281
- Returns:
282
- List of suggestion strings.
283
-
284
- Example:
285
- >>> yep = YepSearch()
286
- >>> suggestions = yep.suggestions("ca")
287
- >>> print(suggestions)
288
- ['capital one', 'car wash', 'carmax', 'cafe', ...]
289
- """
290
- params = {
291
- "query": query,
292
- "type": "web",
293
- "gl": region
294
- }
295
-
296
- url = f"https://api.yep.com/ac/?{urlencode(params)}"
297
-
298
- try:
299
- # Use the session timeout defined in __init__
300
- response = self.session.get(url)
301
- response.raise_for_status()
302
- data = response.json()
303
- # Return suggestions list if response format is valid
304
- if isinstance(data, list) and len(data) > 1 and isinstance(data[1], list):
305
- return data[1]
306
- return []
307
-
308
- except Exception as e:
309
- # Provide more specific error context if possible
310
- if hasattr(e, 'response') and e.response is not None:
311
- raise Exception(f"Yep suggestions failed with status {e.response.status_code}: {str(e)}")
312
- else:
313
- raise Exception(f"Yep suggestions failed: {str(e)}")
314
-
315
-
316
- if __name__ == "__main__":
317
- from rich import print
318
- yep = YepSearch(
319
- timeout=20, # Optional: Set custom timeout
320
- proxies=None, # Optional: Use proxies
321
- verify=True # Optional: SSL verification
322
- )
323
-
324
- # Text Search
325
- text_results = yep.text(
326
- keywords="artificial intelligence",
327
- region="all", # Optional: Region for results
328
- safesearch="moderate", # Optional: "on", "moderate", "off"
329
- max_results=10 # Optional: Limit number of results
330
- )
331
-
332
- # Image Search
333
- image_results = yep.images(
334
- keywords="nature photography",
335
- region="all",
336
- safesearch="moderate",
337
- max_results=10
338
- )
339
-
340
- # Get search suggestions
341
- suggestions = yep.suggestions("hist")
342
- print(text_results)
343
- print("---" * 30)
344
- print(image_results)
345
- print("---" * 30)
346
- print(suggestions)