webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +237 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +27 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +663 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
- webscout-2025.10.13.dist-info/RECORD +329 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
|
@@ -1,178 +1,178 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Scout LXML Parser - Advanced HTML/XML Parsing with lxml
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import re
|
|
6
|
-
from typing import List, Optional, Dict, Any, Union
|
|
7
|
-
|
|
8
|
-
from lxml import etree, html as lxml_html
|
|
9
|
-
from ..element import Tag, NavigableString
|
|
10
|
-
|
|
11
|
-
class LXMLParser:
|
|
12
|
-
"""
|
|
13
|
-
Advanced HTML/XML parser using lxml library.
|
|
14
|
-
Provides robust parsing with enhanced error handling and flexibility.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
def __init__(self, parser_type: str = 'html'):
|
|
18
|
-
"""
|
|
19
|
-
Initialize the LXML parser with advanced parsing capabilities.
|
|
20
|
-
|
|
21
|
-
Args:
|
|
22
|
-
parser_type (str): Type of parser ('html' or 'xml')
|
|
23
|
-
"""
|
|
24
|
-
self._parser_type = parser_type
|
|
25
|
-
self._parsing_errors = []
|
|
26
|
-
|
|
27
|
-
# Select appropriate parser based on type
|
|
28
|
-
if parser_type == 'html':
|
|
29
|
-
self._parser = lxml_html.HTMLParser(recover=True, encoding='utf-8')
|
|
30
|
-
elif parser_type == 'xml':
|
|
31
|
-
self._parser = etree.XMLParser(recover=True, encoding='utf-8')
|
|
32
|
-
else:
|
|
33
|
-
raise ValueError(f"Unsupported parser type: {parser_type}")
|
|
34
|
-
|
|
35
|
-
def parse(self, markup: str) -> Tag:
|
|
36
|
-
"""
|
|
37
|
-
Parse HTML/XML markup and return the root tag.
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
markup (str): HTML/XML content to parse
|
|
41
|
-
|
|
42
|
-
Returns:
|
|
43
|
-
Tag: Parsed document root
|
|
44
|
-
"""
|
|
45
|
-
try:
|
|
46
|
-
# Preprocess markup to handle common issues
|
|
47
|
-
markup = self._preprocess_markup(markup)
|
|
48
|
-
|
|
49
|
-
# Parse the markup
|
|
50
|
-
if self._parser_type == 'html':
|
|
51
|
-
tree = lxml_html.fromstring(markup, parser=self._parser)
|
|
52
|
-
else:
|
|
53
|
-
tree = etree.fromstring(markup, parser=self._parser)
|
|
54
|
-
|
|
55
|
-
# Convert lxml element to Scout Tag
|
|
56
|
-
return self._convert_element(tree)
|
|
57
|
-
|
|
58
|
-
except Exception as e:
|
|
59
|
-
self._parsing_errors.append(str(e))
|
|
60
|
-
return Tag('root')
|
|
61
|
-
|
|
62
|
-
def _preprocess_markup(self, markup: str) -> str:
|
|
63
|
-
"""
|
|
64
|
-
Preprocess markup to handle common parsing issues.
|
|
65
|
-
|
|
66
|
-
Args:
|
|
67
|
-
markup (str): Raw markup
|
|
68
|
-
|
|
69
|
-
Returns:
|
|
70
|
-
str: Preprocessed markup
|
|
71
|
-
"""
|
|
72
|
-
# Remove XML/HTML comments
|
|
73
|
-
markup = re.sub(r'<!--.*?-->', '', markup, flags=re.DOTALL)
|
|
74
|
-
|
|
75
|
-
# Handle unclosed tags for HTML
|
|
76
|
-
if self._parser_type == 'html':
|
|
77
|
-
markup = re.sub(r'<(br|img|input|hr|meta)([^>]*?)(?<!/)>', r'<\1\2 />', markup, flags=re.IGNORECASE)
|
|
78
|
-
|
|
79
|
-
return markup
|
|
80
|
-
|
|
81
|
-
def _convert_element(self, element: Union[etree._Element, lxml_html.HtmlElement]) -> Tag:
|
|
82
|
-
"""
|
|
83
|
-
Convert lxml element to Scout Tag.
|
|
84
|
-
|
|
85
|
-
Args:
|
|
86
|
-
element (etree._Element or lxml_html.HtmlElement): lxml element
|
|
87
|
-
|
|
88
|
-
Returns:
|
|
89
|
-
Tag: Converted Scout Tag
|
|
90
|
-
"""
|
|
91
|
-
# Create Tag with name and attributes
|
|
92
|
-
tag = Tag(element.tag, dict(element.attrib))
|
|
93
|
-
|
|
94
|
-
# Add text content
|
|
95
|
-
if element.text:
|
|
96
|
-
tag.contents.append(NavigableString(element.text))
|
|
97
|
-
|
|
98
|
-
# Recursively add child elements
|
|
99
|
-
for child in element:
|
|
100
|
-
child_tag = self._convert_element(child)
|
|
101
|
-
child_tag.parent = tag
|
|
102
|
-
tag.contents.append(child_tag)
|
|
103
|
-
|
|
104
|
-
# Add tail text
|
|
105
|
-
if child.tail:
|
|
106
|
-
tail_text = NavigableString(child.tail)
|
|
107
|
-
tail_text.parent = tag
|
|
108
|
-
tag.contents.append(tail_text)
|
|
109
|
-
|
|
110
|
-
return tag
|
|
111
|
-
|
|
112
|
-
def get_parsing_errors(self) -> List[str]:
|
|
113
|
-
"""
|
|
114
|
-
Retrieve parsing errors encountered during processing.
|
|
115
|
-
|
|
116
|
-
Returns:
|
|
117
|
-
List[str]: List of parsing error messages
|
|
118
|
-
"""
|
|
119
|
-
return self._parsing_errors
|
|
120
|
-
|
|
121
|
-
def find_all(self, markup: str, tag: Optional[Union[str, List[str]]] = None,
|
|
122
|
-
attrs: Optional[Dict[str, Any]] = None,
|
|
123
|
-
recursive: bool = True,
|
|
124
|
-
text: Optional[str] = None,
|
|
125
|
-
limit: Optional[int] = None) -> List[Tag]:
|
|
126
|
-
"""
|
|
127
|
-
Find all matching elements in the parsed document.
|
|
128
|
-
|
|
129
|
-
Args:
|
|
130
|
-
markup (str): HTML/XML content to parse
|
|
131
|
-
tag (str or List[str], optional): Tag name(s) to search for
|
|
132
|
-
attrs (dict, optional): Attribute filters
|
|
133
|
-
recursive (bool): Whether to search recursively
|
|
134
|
-
text (str, optional): Text content to search for
|
|
135
|
-
limit (int, optional): Maximum number of results
|
|
136
|
-
|
|
137
|
-
Returns:
|
|
138
|
-
List[Tag]: List of matching tags
|
|
139
|
-
"""
|
|
140
|
-
root = self.parse(markup)
|
|
141
|
-
|
|
142
|
-
def matches(element: Tag) -> bool:
|
|
143
|
-
"""Check if an element matches search criteria."""
|
|
144
|
-
# Tag filter
|
|
145
|
-
if tag and isinstance(tag, str) and element.name != tag:
|
|
146
|
-
return False
|
|
147
|
-
if tag and isinstance(tag, list) and element.name not in tag:
|
|
148
|
-
return False
|
|
149
|
-
|
|
150
|
-
# Attribute filter
|
|
151
|
-
if attrs:
|
|
152
|
-
for key, value in attrs.items():
|
|
153
|
-
if key not in element.attrs or element.attrs[key] != value:
|
|
154
|
-
return False
|
|
155
|
-
|
|
156
|
-
# Text filter
|
|
157
|
-
if text:
|
|
158
|
-
element_text = ' '.join([str(c) for c in element.contents if isinstance(c, NavigableString)])
|
|
159
|
-
if text not in element_text:
|
|
160
|
-
return False
|
|
161
|
-
|
|
162
|
-
return True
|
|
163
|
-
|
|
164
|
-
def collect_matches(element: Tag, results: List[Tag]):
|
|
165
|
-
"""Recursively collect matching elements."""
|
|
166
|
-
if matches(element):
|
|
167
|
-
results.append(element)
|
|
168
|
-
if limit and len(results) >= limit:
|
|
169
|
-
return
|
|
170
|
-
|
|
171
|
-
if recursive:
|
|
172
|
-
for child in element.contents:
|
|
173
|
-
if isinstance(child, Tag):
|
|
174
|
-
collect_matches(child, results)
|
|
175
|
-
|
|
176
|
-
results = []
|
|
177
|
-
collect_matches(root, results)
|
|
178
|
-
return results
|
|
1
|
+
"""
|
|
2
|
+
Scout LXML Parser - Advanced HTML/XML Parsing with lxml
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from typing import List, Optional, Dict, Any, Union
|
|
7
|
+
|
|
8
|
+
from lxml import etree, html as lxml_html
|
|
9
|
+
from ..element import Tag, NavigableString
|
|
10
|
+
|
|
11
|
+
class LXMLParser:
|
|
12
|
+
"""
|
|
13
|
+
Advanced HTML/XML parser using lxml library.
|
|
14
|
+
Provides robust parsing with enhanced error handling and flexibility.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, parser_type: str = 'html'):
|
|
18
|
+
"""
|
|
19
|
+
Initialize the LXML parser with advanced parsing capabilities.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
parser_type (str): Type of parser ('html' or 'xml')
|
|
23
|
+
"""
|
|
24
|
+
self._parser_type = parser_type
|
|
25
|
+
self._parsing_errors = []
|
|
26
|
+
|
|
27
|
+
# Select appropriate parser based on type
|
|
28
|
+
if parser_type == 'html':
|
|
29
|
+
self._parser = lxml_html.HTMLParser(recover=True, encoding='utf-8')
|
|
30
|
+
elif parser_type == 'xml':
|
|
31
|
+
self._parser = etree.XMLParser(recover=True, encoding='utf-8')
|
|
32
|
+
else:
|
|
33
|
+
raise ValueError(f"Unsupported parser type: {parser_type}")
|
|
34
|
+
|
|
35
|
+
def parse(self, markup: str) -> Tag:
|
|
36
|
+
"""
|
|
37
|
+
Parse HTML/XML markup and return the root tag.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
markup (str): HTML/XML content to parse
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Tag: Parsed document root
|
|
44
|
+
"""
|
|
45
|
+
try:
|
|
46
|
+
# Preprocess markup to handle common issues
|
|
47
|
+
markup = self._preprocess_markup(markup)
|
|
48
|
+
|
|
49
|
+
# Parse the markup
|
|
50
|
+
if self._parser_type == 'html':
|
|
51
|
+
tree = lxml_html.fromstring(markup, parser=self._parser)
|
|
52
|
+
else:
|
|
53
|
+
tree = etree.fromstring(markup, parser=self._parser)
|
|
54
|
+
|
|
55
|
+
# Convert lxml element to Scout Tag
|
|
56
|
+
return self._convert_element(tree)
|
|
57
|
+
|
|
58
|
+
except Exception as e:
|
|
59
|
+
self._parsing_errors.append(str(e))
|
|
60
|
+
return Tag('root')
|
|
61
|
+
|
|
62
|
+
def _preprocess_markup(self, markup: str) -> str:
|
|
63
|
+
"""
|
|
64
|
+
Preprocess markup to handle common parsing issues.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
markup (str): Raw markup
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
str: Preprocessed markup
|
|
71
|
+
"""
|
|
72
|
+
# Remove XML/HTML comments
|
|
73
|
+
markup = re.sub(r'<!--.*?-->', '', markup, flags=re.DOTALL)
|
|
74
|
+
|
|
75
|
+
# Handle unclosed tags for HTML
|
|
76
|
+
if self._parser_type == 'html':
|
|
77
|
+
markup = re.sub(r'<(br|img|input|hr|meta)([^>]*?)(?<!/)>', r'<\1\2 />', markup, flags=re.IGNORECASE)
|
|
78
|
+
|
|
79
|
+
return markup
|
|
80
|
+
|
|
81
|
+
def _convert_element(self, element: Union[etree._Element, lxml_html.HtmlElement]) -> Tag:
|
|
82
|
+
"""
|
|
83
|
+
Convert lxml element to Scout Tag.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
element (etree._Element or lxml_html.HtmlElement): lxml element
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Tag: Converted Scout Tag
|
|
90
|
+
"""
|
|
91
|
+
# Create Tag with name and attributes
|
|
92
|
+
tag = Tag(element.tag, dict(element.attrib))
|
|
93
|
+
|
|
94
|
+
# Add text content
|
|
95
|
+
if element.text:
|
|
96
|
+
tag.contents.append(NavigableString(element.text))
|
|
97
|
+
|
|
98
|
+
# Recursively add child elements
|
|
99
|
+
for child in element:
|
|
100
|
+
child_tag = self._convert_element(child)
|
|
101
|
+
child_tag.parent = tag
|
|
102
|
+
tag.contents.append(child_tag)
|
|
103
|
+
|
|
104
|
+
# Add tail text
|
|
105
|
+
if child.tail:
|
|
106
|
+
tail_text = NavigableString(child.tail)
|
|
107
|
+
tail_text.parent = tag
|
|
108
|
+
tag.contents.append(tail_text)
|
|
109
|
+
|
|
110
|
+
return tag
|
|
111
|
+
|
|
112
|
+
def get_parsing_errors(self) -> List[str]:
|
|
113
|
+
"""
|
|
114
|
+
Retrieve parsing errors encountered during processing.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
List[str]: List of parsing error messages
|
|
118
|
+
"""
|
|
119
|
+
return self._parsing_errors
|
|
120
|
+
|
|
121
|
+
def find_all(self, markup: str, tag: Optional[Union[str, List[str]]] = None,
|
|
122
|
+
attrs: Optional[Dict[str, Any]] = None,
|
|
123
|
+
recursive: bool = True,
|
|
124
|
+
text: Optional[str] = None,
|
|
125
|
+
limit: Optional[int] = None) -> List[Tag]:
|
|
126
|
+
"""
|
|
127
|
+
Find all matching elements in the parsed document.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
markup (str): HTML/XML content to parse
|
|
131
|
+
tag (str or List[str], optional): Tag name(s) to search for
|
|
132
|
+
attrs (dict, optional): Attribute filters
|
|
133
|
+
recursive (bool): Whether to search recursively
|
|
134
|
+
text (str, optional): Text content to search for
|
|
135
|
+
limit (int, optional): Maximum number of results
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
List[Tag]: List of matching tags
|
|
139
|
+
"""
|
|
140
|
+
root = self.parse(markup)
|
|
141
|
+
|
|
142
|
+
def matches(element: Tag) -> bool:
|
|
143
|
+
"""Check if an element matches search criteria."""
|
|
144
|
+
# Tag filter
|
|
145
|
+
if tag and isinstance(tag, str) and element.name != tag:
|
|
146
|
+
return False
|
|
147
|
+
if tag and isinstance(tag, list) and element.name not in tag:
|
|
148
|
+
return False
|
|
149
|
+
|
|
150
|
+
# Attribute filter
|
|
151
|
+
if attrs:
|
|
152
|
+
for key, value in attrs.items():
|
|
153
|
+
if key not in element.attrs or element.attrs[key] != value:
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
# Text filter
|
|
157
|
+
if text:
|
|
158
|
+
element_text = ' '.join([str(c) for c in element.contents if isinstance(c, NavigableString)])
|
|
159
|
+
if text not in element_text:
|
|
160
|
+
return False
|
|
161
|
+
|
|
162
|
+
return True
|
|
163
|
+
|
|
164
|
+
def collect_matches(element: Tag, results: List[Tag]):
|
|
165
|
+
"""Recursively collect matching elements."""
|
|
166
|
+
if matches(element):
|
|
167
|
+
results.append(element)
|
|
168
|
+
if limit and len(results) >= limit:
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
if recursive:
|
|
172
|
+
for child in element.contents:
|
|
173
|
+
if isinstance(child, Tag):
|
|
174
|
+
collect_matches(child, results)
|
|
175
|
+
|
|
176
|
+
results = []
|
|
177
|
+
collect_matches(root, results)
|
|
178
|
+
return results
|
webscout/scout/utils.py
CHANGED
|
@@ -1,37 +1,37 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Utility functions - making life easier! 🛠️
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from typing import Union, Optional
|
|
6
|
-
|
|
7
|
-
def decode_markup(markup: Union[str, bytes], encoding: Optional[str] = None) -> str:
|
|
8
|
-
"""
|
|
9
|
-
Decode that markup - no encoding drama! 🎯
|
|
10
|
-
|
|
11
|
-
Args:
|
|
12
|
-
markup: The raw HTML/XML content
|
|
13
|
-
encoding: The encoding to use (if known)
|
|
14
|
-
|
|
15
|
-
Returns:
|
|
16
|
-
Clean decoded string ready for parsing! ✨
|
|
17
|
-
"""
|
|
18
|
-
if isinstance(markup, str):
|
|
19
|
-
return markup
|
|
20
|
-
|
|
21
|
-
if encoding:
|
|
22
|
-
try:
|
|
23
|
-
return markup.decode(encoding)
|
|
24
|
-
except UnicodeDecodeError:
|
|
25
|
-
pass
|
|
26
|
-
|
|
27
|
-
# Try common encodings - we got options! 💪
|
|
28
|
-
encodings = ['utf-8', 'latin1', 'iso-8859-1', 'ascii']
|
|
29
|
-
|
|
30
|
-
for enc in encodings:
|
|
31
|
-
try:
|
|
32
|
-
return markup.decode(enc)
|
|
33
|
-
except UnicodeDecodeError:
|
|
34
|
-
continue
|
|
35
|
-
|
|
36
|
-
# Last resort - ignore errors and keep it moving! 🚀
|
|
37
|
-
return markup.decode('utf-8', errors='ignore')
|
|
1
|
+
"""
|
|
2
|
+
Utility functions - making life easier! 🛠️
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Union, Optional
|
|
6
|
+
|
|
7
|
+
def decode_markup(markup: Union[str, bytes], encoding: Optional[str] = None) -> str:
|
|
8
|
+
"""
|
|
9
|
+
Decode that markup - no encoding drama! 🎯
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
markup: The raw HTML/XML content
|
|
13
|
+
encoding: The encoding to use (if known)
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Clean decoded string ready for parsing! ✨
|
|
17
|
+
"""
|
|
18
|
+
if isinstance(markup, str):
|
|
19
|
+
return markup
|
|
20
|
+
|
|
21
|
+
if encoding:
|
|
22
|
+
try:
|
|
23
|
+
return markup.decode(encoding)
|
|
24
|
+
except UnicodeDecodeError:
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
# Try common encodings - we got options! 💪
|
|
28
|
+
encodings = ['utf-8', 'latin1', 'iso-8859-1', 'ascii']
|
|
29
|
+
|
|
30
|
+
for enc in encodings:
|
|
31
|
+
try:
|
|
32
|
+
return markup.decode(enc)
|
|
33
|
+
except UnicodeDecodeError:
|
|
34
|
+
continue
|
|
35
|
+
|
|
36
|
+
# Last resort - ignore errors and keep it moving! 🚀
|
|
37
|
+
return markup.decode('utf-8', errors='ignore')
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Webscout search module - unified search interfaces."""
|
|
2
|
+
|
|
3
|
+
from .base import BaseSearch, BaseSearchEngine
|
|
4
|
+
from .duckduckgo_main import DuckDuckGoSearch
|
|
5
|
+
from .yep_main import YepSearch
|
|
6
|
+
|
|
7
|
+
# Import new search engines
|
|
8
|
+
from .engines.bing import Bing
|
|
9
|
+
from .engines.brave import Brave
|
|
10
|
+
from .engines.mojeek import Mojeek
|
|
11
|
+
from .engines.yahoo import Yahoo
|
|
12
|
+
from .engines.yandex import Yandex
|
|
13
|
+
from .engines.wikipedia import Wikipedia
|
|
14
|
+
from .engines.bing_news import BingNews
|
|
15
|
+
from .engines.yahoo_news import YahooNews
|
|
16
|
+
|
|
17
|
+
# Import result models
|
|
18
|
+
from .results import (
|
|
19
|
+
TextResult,
|
|
20
|
+
ImagesResult,
|
|
21
|
+
VideosResult,
|
|
22
|
+
NewsResult,
|
|
23
|
+
BooksResult,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
# Base classes
|
|
28
|
+
"BaseSearch",
|
|
29
|
+
"BaseSearchEngine",
|
|
30
|
+
|
|
31
|
+
# Main search interfaces
|
|
32
|
+
"DuckDuckGoSearch",
|
|
33
|
+
"YepSearch",
|
|
34
|
+
|
|
35
|
+
# Individual engines
|
|
36
|
+
"Bing",
|
|
37
|
+
"Brave",
|
|
38
|
+
"Mojeek",
|
|
39
|
+
"Yahoo",
|
|
40
|
+
"Yandex",
|
|
41
|
+
"Wikipedia",
|
|
42
|
+
"BingNews",
|
|
43
|
+
"YahooNews",
|
|
44
|
+
|
|
45
|
+
# Result models
|
|
46
|
+
"TextResult",
|
|
47
|
+
"ImagesResult",
|
|
48
|
+
"VideosResult",
|
|
49
|
+
"NewsResult",
|
|
50
|
+
"BooksResult",
|
|
51
|
+
]
|
webscout/search/base.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""Base class for search engines."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from collections.abc import Mapping
|
|
8
|
+
from functools import cached_property
|
|
9
|
+
from typing import Any, Generic, Literal, TypeVar
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from lxml import html
|
|
13
|
+
from lxml.etree import HTMLParser as LHTMLParser
|
|
14
|
+
LXML_AVAILABLE = True
|
|
15
|
+
except ImportError:
|
|
16
|
+
LXML_AVAILABLE = False
|
|
17
|
+
html = None # type: ignore
|
|
18
|
+
LHTMLParser = None # type: ignore
|
|
19
|
+
|
|
20
|
+
from .http_client import HttpClient
|
|
21
|
+
from .results import BooksResult, ImagesResult, NewsResult, TextResult, VideosResult
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
T = TypeVar("T")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class BaseSearchEngine(ABC, Generic[T]):
|
|
28
|
+
"""Abstract base class for all search engine backends."""
|
|
29
|
+
|
|
30
|
+
name: str # unique key, e.g. "google"
|
|
31
|
+
category: Literal["text", "images", "videos", "news", "books"]
|
|
32
|
+
provider: str # source of the search results (e.g. "google", "bing", etc.)
|
|
33
|
+
disabled: bool = False # if True, the engine is disabled
|
|
34
|
+
priority: float = 1
|
|
35
|
+
|
|
36
|
+
search_url: str
|
|
37
|
+
search_method: str # GET or POST
|
|
38
|
+
search_headers: Mapping[str, str] = {}
|
|
39
|
+
items_xpath: str = ""
|
|
40
|
+
elements_xpath: Mapping[str, str] = {}
|
|
41
|
+
elements_replace: Mapping[str, str] = {}
|
|
42
|
+
|
|
43
|
+
def __init__(self, proxy: str | None = None, timeout: int | None = None, verify: bool = True):
|
|
44
|
+
"""Initialize search engine.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
proxy: Proxy URL (supports http/https/socks5).
|
|
48
|
+
timeout: Request timeout in seconds.
|
|
49
|
+
verify: Whether to verify SSL certificates.
|
|
50
|
+
"""
|
|
51
|
+
self.http_client = HttpClient(proxy=proxy, timeout=timeout, verify=verify)
|
|
52
|
+
self.results: list[T] = []
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def result_type(self) -> type[T]:
|
|
56
|
+
"""Get result type based on category."""
|
|
57
|
+
categories = {
|
|
58
|
+
"text": TextResult,
|
|
59
|
+
"images": ImagesResult,
|
|
60
|
+
"videos": VideosResult,
|
|
61
|
+
"news": NewsResult,
|
|
62
|
+
"books": BooksResult,
|
|
63
|
+
}
|
|
64
|
+
return categories[self.category] # type: ignore
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def build_payload(
|
|
68
|
+
self, query: str, region: str, safesearch: str, timelimit: str | None, page: int, **kwargs: Any
|
|
69
|
+
) -> dict[str, Any]:
|
|
70
|
+
"""Build a payload for the search request."""
|
|
71
|
+
raise NotImplementedError
|
|
72
|
+
|
|
73
|
+
def request(self, method: str, url: str, **kwargs: Any) -> str | None:
|
|
74
|
+
"""Make a request to the search engine."""
|
|
75
|
+
try:
|
|
76
|
+
response = self.http_client.request(method, url, **kwargs) # type: ignore
|
|
77
|
+
return response.text
|
|
78
|
+
except Exception as ex:
|
|
79
|
+
logger.error("Error in %s request: %r", self.name, ex)
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
@cached_property
|
|
83
|
+
def parser(self) -> Any:
|
|
84
|
+
"""Get HTML parser."""
|
|
85
|
+
if not LXML_AVAILABLE:
|
|
86
|
+
logger.warning("lxml not available, HTML parsing disabled")
|
|
87
|
+
return None
|
|
88
|
+
return LHTMLParser(remove_blank_text=True, remove_comments=True, remove_pis=True, collect_ids=False)
|
|
89
|
+
|
|
90
|
+
def extract_tree(self, html_text: str) -> Any:
|
|
91
|
+
"""Extract html tree from html text."""
|
|
92
|
+
if not LXML_AVAILABLE or not self.parser:
|
|
93
|
+
raise ImportError("lxml is required for HTML parsing")
|
|
94
|
+
return html.fromstring(html_text, parser=self.parser)
|
|
95
|
+
|
|
96
|
+
def pre_process_html(self, html_text: str) -> str:
|
|
97
|
+
"""Pre-process html_text before extracting results."""
|
|
98
|
+
return html_text
|
|
99
|
+
|
|
100
|
+
def extract_results(self, html_text: str) -> list[T]:
|
|
101
|
+
"""Extract search results from html text."""
|
|
102
|
+
if not LXML_AVAILABLE:
|
|
103
|
+
raise ImportError("lxml is required for result extraction")
|
|
104
|
+
|
|
105
|
+
html_text = self.pre_process_html(html_text)
|
|
106
|
+
tree = self.extract_tree(html_text)
|
|
107
|
+
|
|
108
|
+
results = []
|
|
109
|
+
items = tree.xpath(self.items_xpath) if self.items_xpath else []
|
|
110
|
+
|
|
111
|
+
for item in items:
|
|
112
|
+
result = self.result_type()
|
|
113
|
+
for key, xpath in self.elements_xpath.items():
|
|
114
|
+
try:
|
|
115
|
+
data = item.xpath(xpath)
|
|
116
|
+
if data:
|
|
117
|
+
# Join text nodes or get first attribute
|
|
118
|
+
value = "".join(data) if isinstance(data, list) else data
|
|
119
|
+
setattr(result, key, value.strip() if isinstance(value, str) else value)
|
|
120
|
+
except Exception as ex:
|
|
121
|
+
logger.debug("Error extracting %s: %r", key, ex)
|
|
122
|
+
results.append(result)
|
|
123
|
+
|
|
124
|
+
return results
|
|
125
|
+
|
|
126
|
+
def post_extract_results(self, results: list[T]) -> list[T]:
|
|
127
|
+
"""Post-process search results."""
|
|
128
|
+
return results
|
|
129
|
+
|
|
130
|
+
def search(
|
|
131
|
+
self,
|
|
132
|
+
query: str,
|
|
133
|
+
region: str = "us-en",
|
|
134
|
+
safesearch: str = "moderate",
|
|
135
|
+
timelimit: str | None = None,
|
|
136
|
+
page: int = 1,
|
|
137
|
+
**kwargs: Any,
|
|
138
|
+
) -> list[T] | None:
|
|
139
|
+
"""Search the engine."""
|
|
140
|
+
payload = self.build_payload(
|
|
141
|
+
query=query, region=region, safesearch=safesearch, timelimit=timelimit, page=page, **kwargs
|
|
142
|
+
)
|
|
143
|
+
if self.search_method == "GET":
|
|
144
|
+
html_text = self.request(self.search_method, self.search_url, params=payload, headers=self.search_headers)
|
|
145
|
+
else:
|
|
146
|
+
html_text = self.request(self.search_method, self.search_url, data=payload, headers=self.search_headers)
|
|
147
|
+
if not html_text:
|
|
148
|
+
return None
|
|
149
|
+
results = self.extract_results(html_text)
|
|
150
|
+
return self.post_extract_results(results)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# Legacy base class for backwards compatibility
|
|
154
|
+
class BaseSearch(ABC):
|
|
155
|
+
"""Base class for synchronous search engines (legacy)."""
|
|
156
|
+
|
|
157
|
+
@abstractmethod
|
|
158
|
+
def text(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
159
|
+
"""Text search."""
|
|
160
|
+
raise NotImplementedError
|
|
161
|
+
|
|
162
|
+
@abstractmethod
|
|
163
|
+
def images(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
164
|
+
"""Images search."""
|
|
165
|
+
raise NotImplementedError
|
|
166
|
+
|
|
167
|
+
@abstractmethod
|
|
168
|
+
def videos(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
169
|
+
"""Videos search."""
|
|
170
|
+
raise NotImplementedError
|
|
171
|
+
|
|
172
|
+
@abstractmethod
|
|
173
|
+
def news(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
174
|
+
"""News search."""
|
|
175
|
+
raise NotImplementedError
|
|
176
|
+
|
|
177
|
+
@abstractmethod
|
|
178
|
+
def answers(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
179
|
+
"""Instant answers."""
|
|
180
|
+
raise NotImplementedError
|
|
181
|
+
|
|
182
|
+
@abstractmethod
|
|
183
|
+
def suggestions(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
184
|
+
"""Suggestions."""
|
|
185
|
+
raise NotImplementedError
|
|
186
|
+
|
|
187
|
+
@abstractmethod
|
|
188
|
+
def maps(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
189
|
+
"""Maps search."""
|
|
190
|
+
raise NotImplementedError
|
|
191
|
+
|
|
192
|
+
@abstractmethod
|
|
193
|
+
def translate(self, *args, **kwargs) -> list[dict[str, str]]:
|
|
194
|
+
"""Translate."""
|
|
195
|
+
raise NotImplementedError
|