webscout 2025.10.11__py3-none-any.whl → 2025.10.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/ChatGPTClone.py +2 -1
- webscout/__init__.py +1 -4
- webscout/auth/routes.py +2 -3
- webscout/cli.py +4 -2
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/METADATA +3 -4
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/RECORD +44 -15
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/WHEEL +0 -0
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/entry_points.txt +0 -0
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-2025.10.11.dist-info → webscout-2025.10.14.1.dist-info}/top_level.txt +0 -0
webscout/Provider/Andi.py
CHANGED
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Conversation
|
|
|
6
6
|
from webscout.AIutel import AwesomePrompts
|
|
7
7
|
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
|
-
from
|
|
9
|
+
from ..search import DuckDuckGoSearch
|
|
10
10
|
from webscout.litagent import LitAgent
|
|
11
11
|
|
|
12
12
|
class AndiSearch(Provider):
|
|
@@ -12,7 +12,8 @@ from webscout.AIutel import Optimizers
|
|
|
12
12
|
from webscout.AIutel import Conversation
|
|
13
13
|
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
14
14
|
from webscout.AIbase import Provider
|
|
15
|
-
from
|
|
15
|
+
from ..search import DuckDuckGoSearch
|
|
16
|
+
from .. import exceptions
|
|
16
17
|
# from webscout.litagent import LitAgent
|
|
17
18
|
|
|
18
19
|
class ChatGPTClone(Provider):
|
webscout/__init__.py
CHANGED
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
# webscout/__init__.py
|
|
2
2
|
|
|
3
|
-
from .
|
|
4
|
-
from .webscout_search_async import AsyncWEBS
|
|
3
|
+
from .search import DuckDuckGoSearch, YepSearch
|
|
5
4
|
from .version import __version__
|
|
6
|
-
from .DWEBS import *
|
|
7
5
|
from .Provider import *
|
|
8
6
|
from .Provider.TTI import *
|
|
9
7
|
from .Provider.TTS import *
|
|
@@ -15,7 +13,6 @@ from .swiftcli import *
|
|
|
15
13
|
from .litagent import LitAgent
|
|
16
14
|
from .scout import *
|
|
17
15
|
from .zeroart import *
|
|
18
|
-
from .yep_search import *
|
|
19
16
|
from .AIutel import *
|
|
20
17
|
|
|
21
18
|
useragent = LitAgent()
|
webscout/auth/routes.py
CHANGED
|
@@ -42,9 +42,8 @@ from .request_processing import (
|
|
|
42
42
|
)
|
|
43
43
|
from .auth_system import get_auth_components
|
|
44
44
|
from .simple_logger import request_logger
|
|
45
|
-
from
|
|
46
|
-
from
|
|
47
|
-
from webscout.webscout_search import WEBS
|
|
45
|
+
from ..search import DuckDuckGoSearch, YepSearch
|
|
46
|
+
from ..DWEBS import GoogleSearch
|
|
48
47
|
from webscout.Bing_search import BingSearch
|
|
49
48
|
|
|
50
49
|
# Setup logger
|
webscout/cli.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
import sys
|
|
2
2
|
from .swiftcli import CLI, option
|
|
3
|
-
from .
|
|
3
|
+
from .search import DuckDuckGoSearch, YepSearch # Import search classes
|
|
4
4
|
from .DWEBS import GoogleSearch # Import GoogleSearch from DWEBS
|
|
5
|
-
from .yep_search import YepSearch # Import YepSearch from yep_search
|
|
6
5
|
from .version import __version__
|
|
7
6
|
|
|
7
|
+
# Alias for backward compatibility
|
|
8
|
+
WEBS = DuckDuckGoSearch
|
|
9
|
+
|
|
8
10
|
|
|
9
11
|
def _print_data(data):
|
|
10
12
|
"""Prints data in a simple formatted way."""
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Webscout search module - unified search interfaces."""
|
|
2
|
+
|
|
3
|
+
from .base import BaseSearch, BaseSearchEngine
|
|
4
|
+
from .duckduckgo_main import DuckDuckGoSearch
|
|
5
|
+
from .yep_main import YepSearch
|
|
6
|
+
|
|
7
|
+
# Import new search engines
|
|
8
|
+
from .engines.bing import Bing
|
|
9
|
+
from .engines.brave import Brave
|
|
10
|
+
from .engines.mojeek import Mojeek
|
|
11
|
+
from .engines.yahoo import Yahoo
|
|
12
|
+
from .engines.yandex import Yandex
|
|
13
|
+
from .engines.wikipedia import Wikipedia
|
|
14
|
+
from .engines.bing_news import BingNews
|
|
15
|
+
from .engines.yahoo_news import YahooNews
|
|
16
|
+
|
|
17
|
+
# Import result models
|
|
18
|
+
from .results import (
|
|
19
|
+
TextResult,
|
|
20
|
+
ImagesResult,
|
|
21
|
+
VideosResult,
|
|
22
|
+
NewsResult,
|
|
23
|
+
BooksResult,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
# Base classes
|
|
28
|
+
"BaseSearch",
|
|
29
|
+
"BaseSearchEngine",
|
|
30
|
+
|
|
31
|
+
# Main search interfaces
|
|
32
|
+
"DuckDuckGoSearch",
|
|
33
|
+
"YepSearch",
|
|
34
|
+
|
|
35
|
+
# Individual engines
|
|
36
|
+
"Bing",
|
|
37
|
+
"Brave",
|
|
38
|
+
"Mojeek",
|
|
39
|
+
"Yahoo",
|
|
40
|
+
"Yandex",
|
|
41
|
+
"Wikipedia",
|
|
42
|
+
"BingNews",
|
|
43
|
+
"YahooNews",
|
|
44
|
+
|
|
45
|
+
# Result models
|
|
46
|
+
"TextResult",
|
|
47
|
+
"ImagesResult",
|
|
48
|
+
"VideosResult",
|
|
49
|
+
"NewsResult",
|
|
50
|
+
"BooksResult",
|
|
51
|
+
]
|
webscout/search/base.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""Base class for search engines."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from collections.abc import Mapping
|
|
8
|
+
from functools import cached_property
|
|
9
|
+
from typing import Any, Generic, Literal, TypeVar
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from lxml import html
|
|
13
|
+
from lxml.etree import HTMLParser as LHTMLParser
|
|
14
|
+
LXML_AVAILABLE = True
|
|
15
|
+
except ImportError:
|
|
16
|
+
LXML_AVAILABLE = False
|
|
17
|
+
html = None # type: ignore
|
|
18
|
+
LHTMLParser = None # type: ignore
|
|
19
|
+
|
|
20
|
+
from .http_client import HttpClient
|
|
21
|
+
from .results import BooksResult, ImagesResult, NewsResult, TextResult, VideosResult
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
T = TypeVar("T")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class BaseSearchEngine(ABC, Generic[T]):
|
|
28
|
+
"""Abstract base class for all search engine backends."""
|
|
29
|
+
|
|
30
|
+
name: str # unique key, e.g. "google"
|
|
31
|
+
category: Literal["text", "images", "videos", "news", "books"]
|
|
32
|
+
provider: str # source of the search results (e.g. "google", "bing", etc.)
|
|
33
|
+
disabled: bool = False # if True, the engine is disabled
|
|
34
|
+
priority: float = 1
|
|
35
|
+
|
|
36
|
+
search_url: str
|
|
37
|
+
search_method: str # GET or POST
|
|
38
|
+
search_headers: Mapping[str, str] = {}
|
|
39
|
+
items_xpath: str = ""
|
|
40
|
+
elements_xpath: Mapping[str, str] = {}
|
|
41
|
+
elements_replace: Mapping[str, str] = {}
|
|
42
|
+
|
|
43
|
+
def __init__(self, proxy: str | None = None, timeout: int | None = None, verify: bool = True):
|
|
44
|
+
"""Initialize search engine.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
proxy: Proxy URL (supports http/https/socks5).
|
|
48
|
+
timeout: Request timeout in seconds.
|
|
49
|
+
verify: Whether to verify SSL certificates.
|
|
50
|
+
"""
|
|
51
|
+
self.http_client = HttpClient(proxy=proxy, timeout=timeout, verify=verify)
|
|
52
|
+
self.results: list[T] = []
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def result_type(self) -> type[T]:
|
|
56
|
+
"""Get result type based on category."""
|
|
57
|
+
categories = {
|
|
58
|
+
"text": TextResult,
|
|
59
|
+
"images": ImagesResult,
|
|
60
|
+
"videos": VideosResult,
|
|
61
|
+
"news": NewsResult,
|
|
62
|
+
"books": BooksResult,
|
|
63
|
+
}
|
|
64
|
+
return categories[self.category] # type: ignore
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def build_payload(
|
|
68
|
+
self, query: str, region: str, safesearch: str, timelimit: str | None, page: int, **kwargs: Any
|
|
69
|
+
) -> dict[str, Any]:
|
|
70
|
+
"""Build a payload for the search request."""
|
|
71
|
+
raise NotImplementedError
|
|
72
|
+
|
|
73
|
+
def request(self, method: str, url: str, **kwargs: Any) -> str | None:
|
|
74
|
+
"""Make a request to the search engine."""
|
|
75
|
+
try:
|
|
76
|
+
response = self.http_client.request(method, url, **kwargs) # type: ignore
|
|
77
|
+
return response.text
|
|
78
|
+
except Exception as ex:
|
|
79
|
+
logger.error("Error in %s request: %r", self.name, ex)
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
@cached_property
|
|
83
|
+
def parser(self) -> Any:
|
|
84
|
+
"""Get HTML parser."""
|
|
85
|
+
if not LXML_AVAILABLE:
|
|
86
|
+
logger.warning("lxml not available, HTML parsing disabled")
|
|
87
|
+
return None
|
|
88
|
+
return LHTMLParser(remove_blank_text=True, remove_comments=True, remove_pis=True, collect_ids=False)
|
|
89
|
+
|
|
90
|
+
def extract_tree(self, html_text: str) -> Any:
|
|
91
|
+
"""Extract html tree from html text."""
|
|
92
|
+
if not LXML_AVAILABLE or not self.parser:
|
|
93
|
+
raise ImportError("lxml is required for HTML parsing")
|
|
94
|
+
return html.fromstring(html_text, parser=self.parser)
|
|
95
|
+
|
|
96
|
+
def pre_process_html(self, html_text: str) -> str:
|
|
97
|
+
"""Pre-process html_text before extracting results."""
|
|
98
|
+
return html_text
|
|
99
|
+
|
|
100
|
+
def extract_results(self, html_text: str) -> list[T]:
|
|
101
|
+
"""Extract search results from html text."""
|
|
102
|
+
if not LXML_AVAILABLE:
|
|
103
|
+
raise ImportError("lxml is required for result extraction")
|
|
104
|
+
|
|
105
|
+
html_text = self.pre_process_html(html_text)
|
|
106
|
+
tree = self.extract_tree(html_text)
|
|
107
|
+
|
|
108
|
+
results = []
|
|
109
|
+
items = tree.xpath(self.items_xpath) if self.items_xpath else []
|
|
110
|
+
|
|
111
|
+
for item in items:
|
|
112
|
+
result = self.result_type()
|
|
113
|
+
for key, xpath in self.elements_xpath.items():
|
|
114
|
+
try:
|
|
115
|
+
data = item.xpath(xpath)
|
|
116
|
+
if data:
|
|
117
|
+
# Join text nodes or get first attribute
|
|
118
|
+
value = "".join(data) if isinstance(data, list) else data
|
|
119
|
+
setattr(result, key, value.strip() if isinstance(value, str) else value)
|
|
120
|
+
except Exception as ex:
|
|
121
|
+
logger.debug("Error extracting %s: %r", key, ex)
|
|
122
|
+
results.append(result)
|
|
123
|
+
|
|
124
|
+
return results
|
|
125
|
+
|
|
126
|
+
def post_extract_results(self, results: list[T]) -> list[T]:
|
|
127
|
+
"""Post-process search results."""
|
|
128
|
+
return results
|
|
129
|
+
|
|
130
|
+
def search(
|
|
131
|
+
self,
|
|
132
|
+
query: str,
|
|
133
|
+
region: str = "us-en",
|
|
134
|
+
safesearch: str = "moderate",
|
|
135
|
+
timelimit: str | None = None,
|
|
136
|
+
page: int = 1,
|
|
137
|
+
**kwargs: Any,
|
|
138
|
+
) -> list[T] | None:
|
|
139
|
+
"""Search the engine."""
|
|
140
|
+
payload = self.build_payload(
|
|
141
|
+
query=query, region=region, safesearch=safesearch, timelimit=timelimit, page=page, **kwargs
|
|
142
|
+
)
|
|
143
|
+
if self.search_method == "GET":
|
|
144
|
+
html_text = self.request(self.search_method, self.search_url, params=payload, headers=self.search_headers)
|
|
145
|
+
else:
|
|
146
|
+
html_text = self.request(self.search_method, self.search_url, data=payload, headers=self.search_headers)
|
|
147
|
+
if not html_text:
|
|
148
|
+
return None
|
|
149
|
+
results = self.extract_results(html_text)
|
|
150
|
+
return self.post_extract_results(results)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# Legacy base class for backwards compatibility
|
|
154
|
+
class BaseSearch(ABC):
|
|
155
|
+
"""Base class for synchronous search engines (legacy)."""
|
|
156
|
+
|
|
157
|
+
@abstractmethod
|
|
158
|
+
def text(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
159
|
+
"""Text search."""
|
|
160
|
+
raise NotImplementedError
|
|
161
|
+
|
|
162
|
+
@abstractmethod
|
|
163
|
+
def images(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
164
|
+
"""Images search."""
|
|
165
|
+
raise NotImplementedError
|
|
166
|
+
|
|
167
|
+
@abstractmethod
|
|
168
|
+
def videos(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
169
|
+
"""Videos search."""
|
|
170
|
+
raise NotImplementedError
|
|
171
|
+
|
|
172
|
+
@abstractmethod
|
|
173
|
+
def news(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
174
|
+
"""News search."""
|
|
175
|
+
raise NotImplementedError
|
|
176
|
+
|
|
177
|
+
@abstractmethod
|
|
178
|
+
def answers(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
179
|
+
"""Instant answers."""
|
|
180
|
+
raise NotImplementedError
|
|
181
|
+
|
|
182
|
+
@abstractmethod
|
|
183
|
+
def suggestions(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
184
|
+
"""Suggestions."""
|
|
185
|
+
raise NotImplementedError
|
|
186
|
+
|
|
187
|
+
@abstractmethod
|
|
188
|
+
def maps(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
189
|
+
"""Maps search."""
|
|
190
|
+
raise NotImplementedError
|
|
191
|
+
|
|
192
|
+
@abstractmethod
|
|
193
|
+
def translate(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
194
|
+
"""Translate."""
|
|
195
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""DuckDuckGo unified search interface."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from typing import Dict, List, Optional
|
|
5
|
+
from .base import BaseSearch
|
|
6
|
+
from .engines.duckduckgo.text import DuckDuckGoTextSearch
|
|
7
|
+
from .engines.duckduckgo.images import DuckDuckGoImages
|
|
8
|
+
from .engines.duckduckgo.videos import DuckDuckGoVideos
|
|
9
|
+
from .engines.duckduckgo.news import DuckDuckGoNews
|
|
10
|
+
from .engines.duckduckgo.answers import DuckDuckGoAnswers
|
|
11
|
+
from .engines.duckduckgo.suggestions import DuckDuckGoSuggestions
|
|
12
|
+
from .engines.duckduckgo.maps import DuckDuckGoMaps
|
|
13
|
+
from .engines.duckduckgo.translate import DuckDuckGoTranslate
|
|
14
|
+
from .engines.duckduckgo.weather import DuckDuckGoWeather
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DuckDuckGoSearch(BaseSearch):
|
|
18
|
+
"""Unified DuckDuckGo search interface."""
|
|
19
|
+
|
|
20
|
+
def text(self, keywords: str, region: str = "wt-wt", safesearch: str = "moderate", timelimit: Optional[str] = None, backend: str = "api", max_results: Optional[int] = None) -> List[Dict[str, str]]:
|
|
21
|
+
search = DuckDuckGoTextSearch()
|
|
22
|
+
return search.run(keywords, region, safesearch, timelimit, backend, max_results)
|
|
23
|
+
|
|
24
|
+
def images(self, keywords: str, region: str = "wt-wt", safesearch: str = "moderate", timelimit: Optional[str] = None, size: Optional[str] = None, color: Optional[str] = None, type_image: Optional[str] = None, layout: Optional[str] = None, license_image: Optional[str] = None, max_results: Optional[int] = None) -> List[Dict[str, str]]:
|
|
25
|
+
search = DuckDuckGoImages()
|
|
26
|
+
return search.run(keywords, region, safesearch, timelimit, size, color, type_image, layout, license_image, max_results)
|
|
27
|
+
|
|
28
|
+
def videos(self, keywords: str, region: str = "wt-wt", safesearch: str = "moderate", timelimit: Optional[str] = None, resolution: Optional[str] = None, duration: Optional[str] = None, license_videos: Optional[str] = None, max_results: Optional[int] = None) -> List[Dict[str, str]]:
|
|
29
|
+
search = DuckDuckGoVideos()
|
|
30
|
+
return search.run(keywords, region, safesearch, timelimit, resolution, duration, license_videos, max_results)
|
|
31
|
+
|
|
32
|
+
def news(self, keywords: str, region: str = "wt-wt", safesearch: str = "moderate", timelimit: Optional[str] = None, max_results: Optional[int] = None) -> List[Dict[str, str]]:
|
|
33
|
+
search = DuckDuckGoNews()
|
|
34
|
+
return search.run(keywords, region, safesearch, timelimit, max_results)
|
|
35
|
+
|
|
36
|
+
def answers(self, keywords: str) -> List[Dict[str, str]]:
|
|
37
|
+
search = DuckDuckGoAnswers()
|
|
38
|
+
return search.run(keywords)
|
|
39
|
+
|
|
40
|
+
def suggestions(self, keywords: str, region: str = "wt-wt") -> List[str]:
|
|
41
|
+
search = DuckDuckGoSuggestions()
|
|
42
|
+
return search.run(keywords, region)
|
|
43
|
+
|
|
44
|
+
def maps(self, keywords: str, place: Optional[str] = None, street: Optional[str] = None, city: Optional[str] = None, county: Optional[str] = None, state: Optional[str] = None, country: Optional[str] = None, postalcode: Optional[str] = None, latitude: Optional[str] = None, longitude: Optional[str] = None, radius: int = 0, max_results: Optional[int] = None) -> List[Dict[str, str]]:
|
|
45
|
+
search = DuckDuckGoMaps()
|
|
46
|
+
return search.run(keywords, place, street, city, county, state, country, postalcode, latitude, longitude, radius, max_results)
|
|
47
|
+
|
|
48
|
+
def translate(self, keywords: str, from_lang: Optional[str] = None, to_lang: str = "en") -> List[Dict[str, str]]:
|
|
49
|
+
search = DuckDuckGoTranslate()
|
|
50
|
+
return search.run(keywords, from_lang, to_lang)
|
|
51
|
+
|
|
52
|
+
def weather(self, keywords: str) -> List[Dict[str, str]]:
|
|
53
|
+
search = DuckDuckGoWeather()
|
|
54
|
+
return search.run(keywords)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""__init__.py for engines package - auto-discovers and registers search engines."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import importlib
|
|
6
|
+
import inspect
|
|
7
|
+
import logging
|
|
8
|
+
import pkgutil
|
|
9
|
+
from collections import defaultdict
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from ..base import BaseSearchEngine
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# ENGINES[category][name] = class
|
|
17
|
+
ENGINES: dict[str, dict[str, type[BaseSearchEngine[Any]]]] = defaultdict(dict)
|
|
18
|
+
|
|
19
|
+
package_name = __name__
|
|
20
|
+
package = importlib.import_module(package_name)
|
|
21
|
+
|
|
22
|
+
# Auto-discover all search engine classes
|
|
23
|
+
for finder, modname, _ispkg in pkgutil.iter_modules(package.__path__, package_name + "."):
|
|
24
|
+
try:
|
|
25
|
+
module = importlib.import_module(modname)
|
|
26
|
+
for _, cls in inspect.getmembers(module, inspect.isclass):
|
|
27
|
+
# Must subclass BaseSearchEngine (but not the base itself)
|
|
28
|
+
if not issubclass(cls, BaseSearchEngine) or cls is BaseSearchEngine:
|
|
29
|
+
continue
|
|
30
|
+
|
|
31
|
+
# Skip any class whose name starts with "Base"
|
|
32
|
+
if cls.__name__.startswith("Base"):
|
|
33
|
+
continue
|
|
34
|
+
|
|
35
|
+
# Skip disabled engines
|
|
36
|
+
if getattr(cls, "disabled", False):
|
|
37
|
+
logger.info("Skipping disabled engine: %s", cls.name)
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
# Register the engine
|
|
41
|
+
if hasattr(cls, "name") and hasattr(cls, "category"):
|
|
42
|
+
ENGINES[cls.category][cls.name] = cls
|
|
43
|
+
logger.debug("Registered engine: %s (%s)", cls.name, cls.category)
|
|
44
|
+
except Exception as ex:
|
|
45
|
+
logger.warning("Failed to import module %s: %r", modname, ex)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
__all__ = ["ENGINES"]
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Bing search engine implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
from collections.abc import Mapping
|
|
7
|
+
from time import time
|
|
8
|
+
from typing import Any
|
|
9
|
+
from urllib.parse import parse_qs, urlparse
|
|
10
|
+
|
|
11
|
+
from ..base import BaseSearchEngine
|
|
12
|
+
from ..results import TextResult
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def unwrap_bing_url(raw_url: str) -> str | None:
|
|
16
|
+
"""Decode the Bing-wrapped raw_url to extract the original url."""
|
|
17
|
+
try:
|
|
18
|
+
parsed = urlparse(raw_url)
|
|
19
|
+
if parsed.netloc == "www.bing.com" and parsed.path == "/ck/a":
|
|
20
|
+
query_params = parse_qs(parsed.query)
|
|
21
|
+
if "u" in query_params:
|
|
22
|
+
encoded_url = query_params["u"][0]
|
|
23
|
+
# Decode the base64-like encoding
|
|
24
|
+
if encoded_url.startswith("a1"):
|
|
25
|
+
encoded_url = encoded_url[2:]
|
|
26
|
+
# Add padding if needed
|
|
27
|
+
padding = len(encoded_url) % 4
|
|
28
|
+
if padding:
|
|
29
|
+
encoded_url += "=" * (4 - padding)
|
|
30
|
+
try:
|
|
31
|
+
decoded = base64.urlsafe_b64decode(encoded_url).decode()
|
|
32
|
+
return decoded
|
|
33
|
+
except Exception:
|
|
34
|
+
pass
|
|
35
|
+
return raw_url
|
|
36
|
+
except Exception:
|
|
37
|
+
return raw_url
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Bing(BaseSearchEngine[TextResult]):
|
|
41
|
+
"""Bing search engine."""
|
|
42
|
+
|
|
43
|
+
name = "bing"
|
|
44
|
+
category = "text"
|
|
45
|
+
provider = "bing"
|
|
46
|
+
|
|
47
|
+
search_url = "https://www.bing.com/search"
|
|
48
|
+
search_method = "GET"
|
|
49
|
+
|
|
50
|
+
items_xpath = "//li[contains(@class, 'b_algo')]"
|
|
51
|
+
elements_xpath: Mapping[str, str] = {
|
|
52
|
+
"title": ".//h2/a//text()",
|
|
53
|
+
"href": ".//h2/a/@href",
|
|
54
|
+
"body": ".//p//text()",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def build_payload(
|
|
58
|
+
self, query: str, region: str, safesearch: str, timelimit: str | None, page: int = 1, **kwargs: Any
|
|
59
|
+
) -> dict[str, Any]:
|
|
60
|
+
"""Build a payload for the Bing search request."""
|
|
61
|
+
country, lang = region.lower().split("-")
|
|
62
|
+
payload = {"q": query, "pq": query, "cc": lang}
|
|
63
|
+
cookies = {
|
|
64
|
+
"_EDGE_CD": f"m={lang}-{country}&u={lang}-{country}",
|
|
65
|
+
"_EDGE_S": f"mkt={lang}-{country}&ui={lang}-{country}",
|
|
66
|
+
}
|
|
67
|
+
self.http_client.set_cookies("https://www.bing.com", cookies)
|
|
68
|
+
if timelimit:
|
|
69
|
+
d = int(time() // 86400)
|
|
70
|
+
payload["filters"] = {
|
|
71
|
+
"d": f"ex1:\"ez1_{d - 1}_{d}\"",
|
|
72
|
+
"w": f"ex1:\"ez1_{d - 7}_{d}\"",
|
|
73
|
+
"m": f"ex1:\"ez1_{d - 30}_{d}\"",
|
|
74
|
+
"y": f"ex1:\"ez1_{d - 365}_{d}\"",
|
|
75
|
+
}[timelimit]
|
|
76
|
+
if page > 1:
|
|
77
|
+
payload["first"] = f"{(page - 1) * 10 + 1}"
|
|
78
|
+
return payload
|
|
79
|
+
|
|
80
|
+
def post_extract_results(self, results: list[TextResult]) -> list[TextResult]:
|
|
81
|
+
"""Post-process search results."""
|
|
82
|
+
for result in results:
|
|
83
|
+
result.href = unwrap_bing_url(result.href)
|
|
84
|
+
return results
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""Bing news search engine implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Mapping
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from ..base import BaseSearchEngine
|
|
9
|
+
from ..results import NewsResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BingNews(BaseSearchEngine[NewsResult]):
|
|
13
|
+
"""Bing news engine."""
|
|
14
|
+
|
|
15
|
+
name = "bing"
|
|
16
|
+
category = "news"
|
|
17
|
+
provider = "bing"
|
|
18
|
+
|
|
19
|
+
search_url = "https://www.bing.com/news/infinitescrollajax"
|
|
20
|
+
search_method = "GET"
|
|
21
|
+
|
|
22
|
+
items_xpath = "//div[contains(@class, 'newsitem')]"
|
|
23
|
+
elements_xpath: Mapping[str, str] = {
|
|
24
|
+
"date": ".//span[@aria-label]//@aria-label",
|
|
25
|
+
"title": "@data-title",
|
|
26
|
+
"body": ".//div[@class='snippet']//text()",
|
|
27
|
+
"url": "@url",
|
|
28
|
+
"image": ".//a[contains(@class, 'image')]//@src",
|
|
29
|
+
"source": "@data-author",
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
def build_payload(
|
|
33
|
+
self, query: str, region: str, safesearch: str, timelimit: str | None, page: int = 1, **kwargs: Any
|
|
34
|
+
) -> dict[str, Any]:
|
|
35
|
+
"""Build a payload for the Bing search request."""
|
|
36
|
+
country, lang = region.lower().split("-")
|
|
37
|
+
payload = {
|
|
38
|
+
"q": query,
|
|
39
|
+
"InfiniteScroll": "1",
|
|
40
|
+
"first": f"{page * 10 + 1}",
|
|
41
|
+
"SFX": f"{page}",
|
|
42
|
+
"cc": country,
|
|
43
|
+
"setlang": lang,
|
|
44
|
+
}
|
|
45
|
+
if timelimit:
|
|
46
|
+
payload["qft"] = {
|
|
47
|
+
"d": 'interval="4"', # doesn't exist so it's the same as one hour
|
|
48
|
+
"w": 'interval="7"',
|
|
49
|
+
"m": 'interval="9"',
|
|
50
|
+
"y": 'interval="9"', # doesn't exist so it's the same as month
|
|
51
|
+
}[timelimit]
|
|
52
|
+
return payload
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Brave search engine implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Mapping
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from ..base import BaseSearchEngine
|
|
9
|
+
from ..results import TextResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Brave(BaseSearchEngine[TextResult]):
|
|
13
|
+
"""Brave search engine."""
|
|
14
|
+
|
|
15
|
+
name = "brave"
|
|
16
|
+
category = "text"
|
|
17
|
+
provider = "brave"
|
|
18
|
+
|
|
19
|
+
search_url = "https://search.brave.com/search"
|
|
20
|
+
search_method = "GET"
|
|
21
|
+
|
|
22
|
+
items_xpath = "//div[@data-type='web']"
|
|
23
|
+
elements_xpath: Mapping[str, str] = {
|
|
24
|
+
"title": ".//div[(contains(@class,'title') or contains(@class,'sitename-container')) and position()=last()]//text()",
|
|
25
|
+
"href": "./a/@href",
|
|
26
|
+
"body": ".//div[contains(@class, 'description')]//text()",
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
def build_payload(
|
|
30
|
+
self, query: str, region: str, safesearch: str, timelimit: str | None, page: int = 1, **kwargs: Any
|
|
31
|
+
) -> dict[str, Any]:
|
|
32
|
+
"""Build a payload for the search request."""
|
|
33
|
+
safesearch_base = {"on": "strict", "moderate": "moderate", "off": "off"}
|
|
34
|
+
payload = {
|
|
35
|
+
"q": query,
|
|
36
|
+
"source": "web",
|
|
37
|
+
"safesearch": safesearch_base[safesearch.lower()],
|
|
38
|
+
}
|
|
39
|
+
if timelimit:
|
|
40
|
+
payload["tf"] = timelimit
|
|
41
|
+
if page > 1:
|
|
42
|
+
payload["offset"] = f"{(page - 1) * 10}"
|
|
43
|
+
return payload
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""DuckDuckGo search engines package."""
|
|
2
|
+
|
|
3
|
+
from .answers import DuckDuckGoAnswers
|
|
4
|
+
from .base import DuckDuckGoBase
|
|
5
|
+
from .images import DuckDuckGoImages
|
|
6
|
+
from .maps import DuckDuckGoMaps
|
|
7
|
+
from .news import DuckDuckGoNews
|
|
8
|
+
from .suggestions import DuckDuckGoSuggestions
|
|
9
|
+
from .text import DuckDuckGoTextSearch
|
|
10
|
+
from .translate import DuckDuckGoTranslate
|
|
11
|
+
from .videos import DuckDuckGoVideos
|
|
12
|
+
from .weather import DuckDuckGoWeather
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"DuckDuckGoBase",
|
|
16
|
+
"DuckDuckGoTextSearch",
|
|
17
|
+
"DuckDuckGoImages",
|
|
18
|
+
"DuckDuckGoVideos",
|
|
19
|
+
"DuckDuckGoNews",
|
|
20
|
+
"DuckDuckGoAnswers",
|
|
21
|
+
"DuckDuckGoSuggestions",
|
|
22
|
+
"DuckDuckGoMaps",
|
|
23
|
+
"DuckDuckGoTranslate",
|
|
24
|
+
"DuckDuckGoWeather",
|
|
25
|
+
]
|