webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,568 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scout Main Module - HTML Parsing and Traversal
|
|
3
|
+
"""
|
|
4
|
+
import re
|
|
5
|
+
import json
|
|
6
|
+
import hashlib
|
|
7
|
+
import unicodedata
|
|
8
|
+
import urllib.parse
|
|
9
|
+
from typing import List, Dict, Optional, Any
|
|
10
|
+
|
|
11
|
+
from ..parsers import ParserRegistry
|
|
12
|
+
from ..element import Tag, NavigableString
|
|
13
|
+
from ..utils import decode_markup
|
|
14
|
+
from .text_analyzer import ScoutTextAnalyzer
|
|
15
|
+
from .web_analyzer import ScoutWebAnalyzer
|
|
16
|
+
from .search_result import ScoutSearchResult
|
|
17
|
+
from .text_utils import SentenceTokenizer
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Scout:
|
|
21
|
+
"""
|
|
22
|
+
Scout - Making web scraping a breeze! 🌊
|
|
23
|
+
A comprehensive HTML parsing and traversal library.
|
|
24
|
+
Enhanced with advanced features and intelligent parsing.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, markup="", features='html.parser', from_encoding=None, **kwargs):
|
|
28
|
+
"""
|
|
29
|
+
Initialize Scout with HTML content.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
markup (str): HTML content to parse
|
|
33
|
+
features (str): Parser to use ('html.parser', 'lxml', 'html5lib', 'lxml-xml')
|
|
34
|
+
from_encoding (str): Source encoding (if known)
|
|
35
|
+
**kwargs: Additional parsing options
|
|
36
|
+
"""
|
|
37
|
+
# Intelligent markup handling
|
|
38
|
+
self.markup = self._preprocess_markup(markup, from_encoding)
|
|
39
|
+
self.features = features
|
|
40
|
+
self.from_encoding = from_encoding
|
|
41
|
+
|
|
42
|
+
# Get the right parser for the job
|
|
43
|
+
if features not in ParserRegistry.list_parsers():
|
|
44
|
+
raise ValueError(
|
|
45
|
+
f"Invalid parser '{features}'! Choose from: {', '.join(ParserRegistry.list_parsers().keys())}"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
parser_class = ParserRegistry.get_parser(features)
|
|
49
|
+
self.parser = parser_class
|
|
50
|
+
|
|
51
|
+
# Parse that HTML! 🎯
|
|
52
|
+
self._soup = self.parser.parse(self.markup)
|
|
53
|
+
|
|
54
|
+
# BeautifulSoup-like attributes
|
|
55
|
+
self.name = self._soup.name if hasattr(self._soup, 'name') else None
|
|
56
|
+
self.attrs = self._soup.attrs if hasattr(self._soup, 'attrs') else {}
|
|
57
|
+
|
|
58
|
+
# Advanced parsing options
|
|
59
|
+
self._cache = {}
|
|
60
|
+
|
|
61
|
+
# Text and web analyzers
|
|
62
|
+
self.text_analyzer = ScoutTextAnalyzer()
|
|
63
|
+
self.web_analyzer = ScoutWebAnalyzer()
|
|
64
|
+
|
|
65
|
+
def normalize_text(self, text: str, form='NFKD') -> str:
|
|
66
|
+
"""
|
|
67
|
+
Normalize text using Unicode normalization.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
text (str): Input text
|
|
71
|
+
form (str, optional): Normalization form
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
str: Normalized text
|
|
75
|
+
"""
|
|
76
|
+
return unicodedata.normalize(form, text)
|
|
77
|
+
|
|
78
|
+
def url_parse(self, url: str) -> Dict[str, str]:
|
|
79
|
+
"""
|
|
80
|
+
Parse and analyze a URL.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
url (str): URL to parse
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Dict[str, str]: Parsed URL components
|
|
87
|
+
"""
|
|
88
|
+
parsed = urllib.parse.urlparse(url)
|
|
89
|
+
return {
|
|
90
|
+
'scheme': parsed.scheme,
|
|
91
|
+
'netloc': parsed.netloc,
|
|
92
|
+
'path': parsed.path,
|
|
93
|
+
'params': parsed.params,
|
|
94
|
+
'query': parsed.query,
|
|
95
|
+
'fragment': parsed.fragment
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
def analyze_page_structure(self) -> Dict[str, Any]:
|
|
99
|
+
"""
|
|
100
|
+
Analyze the structure of the parsed page.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Dict[str, Any]: Page structure analysis
|
|
104
|
+
"""
|
|
105
|
+
return self.web_analyzer.analyze_page_structure(self)
|
|
106
|
+
|
|
107
|
+
def analyze_text(self, text: Optional[str] = None) -> Dict[str, Any]:
|
|
108
|
+
"""
|
|
109
|
+
Perform advanced text analysis.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
text (str, optional): Text to analyze. If None, uses page text.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Dict[str, Any]: Text analysis results
|
|
116
|
+
"""
|
|
117
|
+
if text is None:
|
|
118
|
+
text = self.get_text()
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
'word_count': self.text_analyzer.count_words(text),
|
|
122
|
+
'entities': self.text_analyzer.extract_entities(text),
|
|
123
|
+
'tokens': self.text_analyzer.tokenize(text)
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
def extract_semantic_info(self) -> Dict[str, Any]:
|
|
127
|
+
"""
|
|
128
|
+
Extract semantic information from the document.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Dict[str, Any]: Semantic information
|
|
132
|
+
"""
|
|
133
|
+
semantic_info = {
|
|
134
|
+
'headings': {
|
|
135
|
+
'h1': [h.get_text(strip=True) for h in self.find_all('h1')],
|
|
136
|
+
'h2': [h.get_text(strip=True) for h in self.find_all('h2')],
|
|
137
|
+
'h3': [h.get_text(strip=True) for h in self.find_all('h3')]
|
|
138
|
+
},
|
|
139
|
+
'lists': {
|
|
140
|
+
'ul': [ul.find_all('li') for ul in self.find_all('ul')],
|
|
141
|
+
'ol': [ol.find_all('li') for ol in self.find_all('ol')]
|
|
142
|
+
},
|
|
143
|
+
'tables': {
|
|
144
|
+
'count': len(self.find_all('table')),
|
|
145
|
+
'headers': [table.find_all('th') for table in self.find_all('table')]
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
return semantic_info
|
|
149
|
+
|
|
150
|
+
def cache(self, key: str, value: Any = None) -> Any:
|
|
151
|
+
"""
|
|
152
|
+
Manage a cache for parsed content.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
key (str): Cache key
|
|
156
|
+
value (Any, optional): Value to cache
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Any: Cached value or None
|
|
160
|
+
"""
|
|
161
|
+
if value is not None:
|
|
162
|
+
self._cache[key] = value
|
|
163
|
+
return self._cache.get(key)
|
|
164
|
+
|
|
165
|
+
def hash_content(self, method='md5') -> str:
|
|
166
|
+
"""
|
|
167
|
+
Generate a hash of the parsed content.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
method (str, optional): Hashing method
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
str: Content hash
|
|
174
|
+
"""
|
|
175
|
+
hash_methods = {
|
|
176
|
+
'md5': hashlib.md5,
|
|
177
|
+
'sha1': hashlib.sha1,
|
|
178
|
+
'sha256': hashlib.sha256
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if method not in hash_methods:
|
|
182
|
+
raise ValueError(f"Unsupported hash method: {method}")
|
|
183
|
+
|
|
184
|
+
hasher = hash_methods[method]()
|
|
185
|
+
hasher.update(str(self._soup).encode('utf-8'))
|
|
186
|
+
return hasher.hexdigest()
|
|
187
|
+
|
|
188
|
+
def extract_links(self, base_url: Optional[str] = None) -> List[Dict[str, str]]:
|
|
189
|
+
"""
|
|
190
|
+
Extract all links from the document.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
base_url (str, optional): Base URL for resolving relative links
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
List[Dict[str, str]]: List of link dictionaries
|
|
197
|
+
"""
|
|
198
|
+
links = []
|
|
199
|
+
for link in self.find_all(['a', 'link']):
|
|
200
|
+
href = link.get('href')
|
|
201
|
+
if href:
|
|
202
|
+
# Resolve relative URLs if base_url is provided
|
|
203
|
+
if base_url and not href.startswith(('http://', 'https://', '//')):
|
|
204
|
+
href = f"{base_url.rstrip('/')}/{href.lstrip('/')}"
|
|
205
|
+
|
|
206
|
+
links.append({
|
|
207
|
+
'href': href,
|
|
208
|
+
'text': link.get_text(strip=True),
|
|
209
|
+
'rel': link.get('rel', [None])[0],
|
|
210
|
+
'type': link.get('type')
|
|
211
|
+
})
|
|
212
|
+
return links
|
|
213
|
+
|
|
214
|
+
def extract_metadata(self) -> Dict[str, Any]:
|
|
215
|
+
"""
|
|
216
|
+
Extract metadata from HTML document.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
Dict[str, Any]: Extracted metadata
|
|
220
|
+
"""
|
|
221
|
+
metadata = {
|
|
222
|
+
'title': self.find('title').texts()[0] if self.find('title').texts() else None,
|
|
223
|
+
'description': self.find('meta', attrs={'name': 'description'}).attrs('content')[0] if self.find('meta', attrs={'name': 'description'}).attrs('content') else None,
|
|
224
|
+
'keywords': self.find('meta', attrs={'name': 'keywords'}).attrs('content')[0].split(',') if self.find('meta', attrs={'name': 'keywords'}).attrs('content') else [],
|
|
225
|
+
'og_metadata': {},
|
|
226
|
+
'twitter_metadata': {}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
# Open Graph metadata
|
|
230
|
+
for meta in self.find_all('meta', attrs={'property': re.compile(r'^og:')}):
|
|
231
|
+
key = meta.attrs('property')[0][3:]
|
|
232
|
+
metadata['og_metadata'][key] = meta.attrs('content')[0]
|
|
233
|
+
|
|
234
|
+
# Twitter Card metadata
|
|
235
|
+
for meta in self.find_all('meta', attrs={'name': re.compile(r'^twitter:')}):
|
|
236
|
+
key = meta.attrs('name')[0][8:]
|
|
237
|
+
metadata['twitter_metadata'][key] = meta.attrs('content')[0]
|
|
238
|
+
|
|
239
|
+
return metadata
|
|
240
|
+
|
|
241
|
+
def to_json(self, indent=2) -> str:
|
|
242
|
+
"""
|
|
243
|
+
Convert parsed content to JSON.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
indent (int, optional): JSON indentation
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
str: JSON representation of the document
|
|
250
|
+
"""
|
|
251
|
+
def _tag_to_dict(tag):
|
|
252
|
+
if isinstance(tag, NavigableString):
|
|
253
|
+
return str(tag)
|
|
254
|
+
|
|
255
|
+
result = {
|
|
256
|
+
'name': tag.name,
|
|
257
|
+
'attrs': tag.attrs,
|
|
258
|
+
'text': tag.get_text(strip=True)
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
if tag.contents:
|
|
262
|
+
result['children'] = [_tag_to_dict(child) for child in tag.contents]
|
|
263
|
+
|
|
264
|
+
return result
|
|
265
|
+
|
|
266
|
+
return json.dumps(_tag_to_dict(self._soup), indent=indent)
|
|
267
|
+
|
|
268
|
+
def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs) -> ScoutSearchResult:
|
|
269
|
+
"""
|
|
270
|
+
Find the first matching element.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
name (str, optional): Tag name to search for
|
|
274
|
+
attrs (dict, optional): Attributes to match
|
|
275
|
+
recursive (bool, optional): Search recursively
|
|
276
|
+
text (str, optional): Text content to match
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
ScoutSearchResult: First matching element
|
|
280
|
+
"""
|
|
281
|
+
result = self._soup.find(name, attrs, recursive, text, **kwargs)
|
|
282
|
+
return ScoutSearchResult([result]) if result else ScoutSearchResult([])
|
|
283
|
+
|
|
284
|
+
def find_all(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs) -> ScoutSearchResult:
|
|
285
|
+
"""
|
|
286
|
+
Find all matching elements.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
name (str, optional): Tag name to search for
|
|
290
|
+
attrs (dict, optional): Attributes to match
|
|
291
|
+
recursive (bool, optional): Search recursively
|
|
292
|
+
text (str, optional): Text content to match
|
|
293
|
+
limit (int, optional): Maximum number of results
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
ScoutSearchResult: List of matching elements
|
|
297
|
+
"""
|
|
298
|
+
results = self._soup.find_all(name, attrs, recursive, text, limit, **kwargs)
|
|
299
|
+
return ScoutSearchResult(results)
|
|
300
|
+
|
|
301
|
+
def find_parent(self, name=None, attrs={}, **kwargs) -> Optional[Tag]:
|
|
302
|
+
"""
|
|
303
|
+
Find the first parent matching given criteria.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
name (str, optional): Tag name to search for
|
|
307
|
+
attrs (dict, optional): Attributes to match
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
Tag or None: First matching parent
|
|
311
|
+
"""
|
|
312
|
+
current = self._soup.parent
|
|
313
|
+
while current:
|
|
314
|
+
if (name is None or current.name == name) and \
|
|
315
|
+
all(current.get(k) == v for k, v in attrs.items()):
|
|
316
|
+
return current
|
|
317
|
+
current = current.parent
|
|
318
|
+
return None
|
|
319
|
+
|
|
320
|
+
def find_parents(self, name=None, attrs={}, limit=None, **kwargs) -> List[Tag]:
|
|
321
|
+
"""
|
|
322
|
+
Find all parents matching given criteria.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
name (str, optional): Tag name to search for
|
|
326
|
+
attrs (dict, optional): Attributes to match
|
|
327
|
+
limit (int, optional): Maximum number of results
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
List[Tag]: List of matching parents
|
|
331
|
+
"""
|
|
332
|
+
parents = []
|
|
333
|
+
current = self._soup.parent
|
|
334
|
+
while current and (limit is None or len(parents) < limit):
|
|
335
|
+
if (name is None or current.name == name) and \
|
|
336
|
+
all(current.get(k) == v for k, v in attrs.items()):
|
|
337
|
+
parents.append(current)
|
|
338
|
+
current = current.parent
|
|
339
|
+
return parents
|
|
340
|
+
|
|
341
|
+
def find_next_sibling(self, name=None, attrs={}, **kwargs) -> Optional[Tag]:
|
|
342
|
+
"""
|
|
343
|
+
Find the next sibling matching given criteria.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
name (str, optional): Tag name to search for
|
|
347
|
+
attrs (dict, optional): Attributes to match
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
Tag or None: First matching next sibling
|
|
351
|
+
"""
|
|
352
|
+
if not self._soup.parent:
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
siblings = self._soup.parent.contents
|
|
356
|
+
try:
|
|
357
|
+
current_index = siblings.index(self._soup)
|
|
358
|
+
for sibling in siblings[current_index + 1:]:
|
|
359
|
+
if isinstance(sibling, Tag):
|
|
360
|
+
if (name is None or sibling.name == name) and \
|
|
361
|
+
all(sibling.get(k) == v for k, v in attrs.items()):
|
|
362
|
+
return sibling
|
|
363
|
+
except ValueError:
|
|
364
|
+
pass
|
|
365
|
+
return None
|
|
366
|
+
|
|
367
|
+
def find_next_siblings(self, name=None, attrs={}, limit=None, **kwargs) -> List[Tag]:
|
|
368
|
+
"""
|
|
369
|
+
Find all next siblings matching given criteria.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
name (str, optional): Tag name to search for
|
|
373
|
+
attrs (dict, optional): Attributes to match
|
|
374
|
+
limit (int, optional): Maximum number of results
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
List[Tag]: List of matching next siblings
|
|
378
|
+
"""
|
|
379
|
+
if not self._soup.parent:
|
|
380
|
+
return []
|
|
381
|
+
|
|
382
|
+
siblings = []
|
|
383
|
+
siblings_list = self._soup.parent.contents
|
|
384
|
+
try:
|
|
385
|
+
current_index = siblings_list.index(self._soup)
|
|
386
|
+
for sibling in siblings_list[current_index + 1:]:
|
|
387
|
+
if isinstance(sibling, Tag):
|
|
388
|
+
if (name is None or sibling.name == name) and \
|
|
389
|
+
all(sibling.get(k) == v for k, v in attrs.items()):
|
|
390
|
+
siblings.append(sibling)
|
|
391
|
+
if limit and len(siblings) == limit:
|
|
392
|
+
break
|
|
393
|
+
except ValueError:
|
|
394
|
+
pass
|
|
395
|
+
return siblings
|
|
396
|
+
|
|
397
|
+
def select(self, selector: str) -> List[Tag]:
|
|
398
|
+
"""
|
|
399
|
+
Select elements using CSS selector.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
selector (str): CSS selector string
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
List[Tag]: List of matching elements
|
|
406
|
+
"""
|
|
407
|
+
return self._soup.select(selector)
|
|
408
|
+
|
|
409
|
+
def select_one(self, selector: str) -> Optional[Tag]:
|
|
410
|
+
"""
|
|
411
|
+
Select the first element matching the CSS selector.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
selector (str): CSS selector string
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Tag or None: First matching element
|
|
418
|
+
"""
|
|
419
|
+
return self._soup.select_one(selector)
|
|
420
|
+
|
|
421
|
+
def get_text(self, separator=' ', strip=False, types=None) -> str:
|
|
422
|
+
"""
|
|
423
|
+
Extract all text from the parsed document.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
separator (str, optional): Text separator
|
|
427
|
+
strip (bool, optional): Strip whitespace
|
|
428
|
+
types (list, optional): Types of content to extract
|
|
429
|
+
|
|
430
|
+
Returns:
|
|
431
|
+
str: Extracted text
|
|
432
|
+
"""
|
|
433
|
+
tokenizer = SentenceTokenizer()
|
|
434
|
+
text = self._soup.get_text(separator, strip, types)
|
|
435
|
+
sentences = tokenizer.tokenize(text)
|
|
436
|
+
return "\n\n".join(sentences)
|
|
437
|
+
|
|
438
|
+
def remove_tags(self, tags: List[str]) -> None:
|
|
439
|
+
"""
|
|
440
|
+
Remove specified tags and their contents from the document.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
tags (List[str]): List of tag names to remove
|
|
444
|
+
"""
|
|
445
|
+
for tag_name in tags:
|
|
446
|
+
for tag in self._soup.find_all(tag_name):
|
|
447
|
+
tag.decompose()
|
|
448
|
+
|
|
449
|
+
def prettify(self, formatter='minimal') -> str:
|
|
450
|
+
"""
|
|
451
|
+
Return a formatted, pretty-printed version of the HTML.
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
formatter (str, optional): Formatting style
|
|
455
|
+
|
|
456
|
+
Returns:
|
|
457
|
+
str: Prettified HTML
|
|
458
|
+
"""
|
|
459
|
+
return self._soup.prettify(formatter)
|
|
460
|
+
|
|
461
|
+
def decompose(self, tag: Tag = None) -> None:
|
|
462
|
+
"""
|
|
463
|
+
Remove a tag and its contents from the document.
|
|
464
|
+
|
|
465
|
+
Args:
|
|
466
|
+
tag (Tag, optional): Tag to remove. If None, removes the root tag.
|
|
467
|
+
"""
|
|
468
|
+
if tag is None:
|
|
469
|
+
tag = self._soup
|
|
470
|
+
tag.decompose()
|
|
471
|
+
|
|
472
|
+
def extract(self, tag: Tag = None) -> Tag:
|
|
473
|
+
"""
|
|
474
|
+
Remove a tag from the document and return it.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
tag (Tag, optional): Tag to extract. If None, extracts the root tag.
|
|
478
|
+
|
|
479
|
+
Returns:
|
|
480
|
+
Tag: Extracted tag
|
|
481
|
+
"""
|
|
482
|
+
if tag is None:
|
|
483
|
+
tag = self._soup
|
|
484
|
+
return tag.extract()
|
|
485
|
+
|
|
486
|
+
def clear(self, tag: Tag = None) -> None:
|
|
487
|
+
"""
|
|
488
|
+
Remove a tag's contents while keeping the tag itself.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
tag (Tag, optional): Tag to clear. If None, clears the root tag.
|
|
492
|
+
"""
|
|
493
|
+
if tag is None:
|
|
494
|
+
tag = self._soup
|
|
495
|
+
tag.clear()
|
|
496
|
+
|
|
497
|
+
def replace_with(self, old_tag: Tag, new_tag: Tag) -> None:
|
|
498
|
+
"""
|
|
499
|
+
Replace one tag with another.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
old_tag (Tag): Tag to replace
|
|
503
|
+
new_tag (Tag): Replacement tag
|
|
504
|
+
"""
|
|
505
|
+
old_tag.replace_with(new_tag)
|
|
506
|
+
|
|
507
|
+
def encode(self, encoding='utf-8') -> bytes:
|
|
508
|
+
"""
|
|
509
|
+
Encode the document to a specific encoding.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
encoding (str, optional): Encoding to use
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
bytes: Encoded document
|
|
516
|
+
"""
|
|
517
|
+
return str(self._soup).encode(encoding)
|
|
518
|
+
|
|
519
|
+
def decode(self, encoding='utf-8') -> str:
|
|
520
|
+
"""
|
|
521
|
+
Decode the document from a specific encoding.
|
|
522
|
+
|
|
523
|
+
Args:
|
|
524
|
+
encoding (str, optional): Encoding to use
|
|
525
|
+
|
|
526
|
+
Returns:
|
|
527
|
+
str: Decoded document
|
|
528
|
+
"""
|
|
529
|
+
return str(self._soup)
|
|
530
|
+
|
|
531
|
+
def __str__(self) -> str:
|
|
532
|
+
"""
|
|
533
|
+
String representation of the parsed document.
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
str: HTML content
|
|
537
|
+
"""
|
|
538
|
+
return str(self._soup)
|
|
539
|
+
|
|
540
|
+
def __repr__(self) -> str:
|
|
541
|
+
"""
|
|
542
|
+
Detailed representation of the Scout object.
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
str: Scout object description
|
|
546
|
+
"""
|
|
547
|
+
return f"Scout(features='{self.features}', content_length={len(self.markup)})"
|
|
548
|
+
|
|
549
|
+
def _preprocess_markup(self, markup: str, encoding: Optional[str] = None) -> str:
|
|
550
|
+
"""
|
|
551
|
+
Preprocess markup before parsing.
|
|
552
|
+
|
|
553
|
+
Args:
|
|
554
|
+
markup (str): Input markup
|
|
555
|
+
encoding (str, optional): Encoding to use
|
|
556
|
+
|
|
557
|
+
Returns:
|
|
558
|
+
str: Preprocessed markup
|
|
559
|
+
"""
|
|
560
|
+
# Decode markup
|
|
561
|
+
decoded_markup = decode_markup(markup, encoding)
|
|
562
|
+
|
|
563
|
+
# Basic HTML cleaning
|
|
564
|
+
# Remove comments, normalize whitespace, etc.
|
|
565
|
+
decoded_markup = re.sub(r'<!--.*?-->', '', decoded_markup, flags=re.DOTALL)
|
|
566
|
+
decoded_markup = re.sub(r'\s+', ' ', decoded_markup)
|
|
567
|
+
|
|
568
|
+
return decoded_markup
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scout Search Result Module
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Union, Callable, Any, Dict, Iterator
|
|
6
|
+
from ..element import Tag
|
|
7
|
+
from .text_analyzer import ScoutTextAnalyzer
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ScoutSearchResult:
|
|
11
|
+
"""
|
|
12
|
+
Represents a search result with advanced querying capabilities.
|
|
13
|
+
Enhanced with more intelligent filtering and processing.
|
|
14
|
+
"""
|
|
15
|
+
def __init__(self, results: List[Tag]):
|
|
16
|
+
"""
|
|
17
|
+
Initialize a search result collection.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
results (List[Tag]): List of matching tags
|
|
21
|
+
"""
|
|
22
|
+
self._results = results
|
|
23
|
+
|
|
24
|
+
def __len__(self) -> int:
|
|
25
|
+
return len(self._results)
|
|
26
|
+
|
|
27
|
+
def __iter__(self) -> Iterator[Tag]:
|
|
28
|
+
return iter(self._results)
|
|
29
|
+
|
|
30
|
+
def __getitem__(self, index: Union[int, slice]) -> Union[Tag, List[Tag]]:
|
|
31
|
+
return self._results[index]
|
|
32
|
+
|
|
33
|
+
def texts(self, separator=' ', strip=True) -> List[str]:
|
|
34
|
+
"""
|
|
35
|
+
Extract texts from all results.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
separator (str, optional): Text separator
|
|
39
|
+
strip (bool, optional): Strip whitespace
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
List[str]: List of extracted texts
|
|
43
|
+
"""
|
|
44
|
+
return [tag.get_text(separator, strip) for tag in self._results]
|
|
45
|
+
|
|
46
|
+
def attrs(self, attr_name: str) -> List[Any]:
|
|
47
|
+
"""
|
|
48
|
+
Extract a specific attribute from all results.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
attr_name (str): Attribute name to extract
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
List[Any]: List of attribute values
|
|
55
|
+
"""
|
|
56
|
+
return [tag.get(attr_name) for tag in self._results]
|
|
57
|
+
|
|
58
|
+
def filter(self, predicate: Callable[[Tag], bool]) -> 'ScoutSearchResult':
|
|
59
|
+
"""
|
|
60
|
+
Filter results using a predicate function.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
predicate (Callable[[Tag], bool]): Filtering function
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
ScoutSearchResult: Filtered search results
|
|
67
|
+
"""
|
|
68
|
+
return ScoutSearchResult([tag for tag in self._results if predicate(tag)])
|
|
69
|
+
|
|
70
|
+
def map(self, transform: Callable[[Tag], Any]) -> List[Any]:
|
|
71
|
+
"""
|
|
72
|
+
Transform results using a mapping function.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
transform (Callable[[Tag], Any]): Transformation function
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
List[Any]: Transformed results
|
|
79
|
+
"""
|
|
80
|
+
return [transform(tag) for tag in self._results]
|
|
81
|
+
|
|
82
|
+
def analyze_text(self) -> Dict[str, Any]:
|
|
83
|
+
"""
|
|
84
|
+
Perform text analysis on search results.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Dict[str, Any]: Text analysis results
|
|
88
|
+
"""
|
|
89
|
+
texts = self.texts(strip=True)
|
|
90
|
+
full_text = ' '.join(texts)
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
'total_results': len(self._results),
|
|
94
|
+
'word_count': ScoutTextAnalyzer.count_words(full_text),
|
|
95
|
+
'entities': ScoutTextAnalyzer.extract_entities(full_text)
|
|
96
|
+
}
|