webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/scout/__init__.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Scout: A powerful, zero-dependency web scraping library
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
|
|
6
|
-
from .element import Tag, NavigableString
|
|
7
|
-
|
|
8
|
-
__all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
|
|
1
|
+
"""
|
|
2
|
+
Scout: A powerful, zero-dependency web scraping library
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
|
|
6
|
+
from .element import Tag, NavigableString
|
|
7
|
+
|
|
8
|
+
__all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
|
webscout/scout/core/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
from .text_analyzer import ScoutTextAnalyzer
|
|
2
|
-
from .web_analyzer import ScoutWebAnalyzer
|
|
3
|
-
from .search_result import ScoutSearchResult
|
|
4
|
-
from .crawler import ScoutCrawler
|
|
5
|
-
from .scout import Scout
|
|
6
|
-
|
|
1
|
+
from .text_analyzer import ScoutTextAnalyzer
|
|
2
|
+
from .web_analyzer import ScoutWebAnalyzer
|
|
3
|
+
from .search_result import ScoutSearchResult
|
|
4
|
+
from .crawler import ScoutCrawler
|
|
5
|
+
from .scout import Scout
|
|
6
|
+
|
|
7
7
|
__all__ = ['ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult', 'ScoutCrawler', 'Scout']
|
webscout/scout/core/crawler.py
CHANGED
|
@@ -1,297 +1,297 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Scout Crawler Module - Ultra Advanced Web Crawling System
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import concurrent.futures
|
|
6
|
-
import urllib.parse
|
|
7
|
-
import time
|
|
8
|
-
import hashlib
|
|
9
|
-
import re
|
|
10
|
-
import json
|
|
11
|
-
import sqlite3
|
|
12
|
-
import threading
|
|
13
|
-
import queue
|
|
14
|
-
import logging
|
|
15
|
-
import mimetypes
|
|
16
|
-
import pickle
|
|
17
|
-
import asyncio
|
|
18
|
-
import aiohttp
|
|
19
|
-
import random
|
|
20
|
-
from urllib import robotparser
|
|
21
|
-
from datetime import datetime, timedelta
|
|
22
|
-
from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
|
|
23
|
-
from collections import defaultdict, deque
|
|
24
|
-
from dataclasses import dataclass, field
|
|
25
|
-
from enum import Enum
|
|
26
|
-
from pathlib import Path
|
|
27
|
-
|
|
28
|
-
try:
|
|
29
|
-
from webscout.litagent import LitAgent
|
|
30
|
-
except ImportError:
|
|
31
|
-
LitAgent = None
|
|
32
|
-
|
|
33
|
-
try:
|
|
34
|
-
from curl_cffi.requests import Session
|
|
35
|
-
except ImportError:
|
|
36
|
-
import requests
|
|
37
|
-
Session = requests.Session
|
|
38
|
-
|
|
39
|
-
from .scout import Scout
|
|
40
|
-
from .text_analyzer import ScoutTextAnalyzer
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
@dataclass
|
|
44
|
-
class CrawlConfig:
|
|
45
|
-
"""Configuration for the crawler."""
|
|
46
|
-
max_pages: int = 1000
|
|
47
|
-
max_depth: int = 10
|
|
48
|
-
delay: float = 0.5
|
|
49
|
-
obey_robots: bool = True
|
|
50
|
-
crawl_subdomains: bool = True
|
|
51
|
-
max_workers: int = 10
|
|
52
|
-
timeout: int = 30
|
|
53
|
-
retry_attempts: int = 3
|
|
54
|
-
include_external_links: bool = False
|
|
55
|
-
extract_metadata: bool = True
|
|
56
|
-
extract_structured_data: bool = True
|
|
57
|
-
extract_semantic_content: bool = True
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
@dataclass
|
|
61
|
-
class PageData:
|
|
62
|
-
"""Comprehensive page data for LLM training."""
|
|
63
|
-
url: str
|
|
64
|
-
title: str
|
|
65
|
-
text: str
|
|
66
|
-
clean_text: str
|
|
67
|
-
markdown_text: str
|
|
68
|
-
links: List[str]
|
|
69
|
-
internal_links: List[str]
|
|
70
|
-
external_links: List[str]
|
|
71
|
-
metadata: Dict[str, Any]
|
|
72
|
-
structured_data: Dict[str, Any]
|
|
73
|
-
semantic_content: Dict[str, Any]
|
|
74
|
-
headers: Dict[str, str]
|
|
75
|
-
status_code: int
|
|
76
|
-
content_type: str
|
|
77
|
-
language: str
|
|
78
|
-
timestamp: str
|
|
79
|
-
depth: int
|
|
80
|
-
word_count: int
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
class ScoutCrawler:
|
|
84
|
-
"""
|
|
85
|
-
Ultra-advanced web crawling utility optimized for LLM data collection.
|
|
86
|
-
"""
|
|
87
|
-
def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
|
|
88
|
-
"""
|
|
89
|
-
Initialize the web crawler.
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
base_url (str): Starting URL to crawl
|
|
93
|
-
max_pages (int, optional): Maximum number of pages to crawl
|
|
94
|
-
tags_to_remove (List[str], optional): List of tags to remove
|
|
95
|
-
"""
|
|
96
|
-
self.base_url = base_url
|
|
97
|
-
self.max_pages = max_pages
|
|
98
|
-
self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
|
|
99
|
-
"script",
|
|
100
|
-
"style"
|
|
101
|
-
]
|
|
102
|
-
self.visited_urls = set()
|
|
103
|
-
self.crawled_pages = []
|
|
104
|
-
self.session = session or Session()
|
|
105
|
-
self.agent = LitAgent()
|
|
106
|
-
# Use all headers and generate fingerprint
|
|
107
|
-
self.session.headers = self.agent.generate_fingerprint()
|
|
108
|
-
self.session.headers.setdefault("User-Agent", self.agent.chrome())
|
|
109
|
-
self.delay = delay
|
|
110
|
-
self.obey_robots = obey_robots
|
|
111
|
-
# Allow crawling of subdomains by default
|
|
112
|
-
base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
|
|
113
|
-
self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
|
|
114
|
-
self.allowed_domains = allowed_domains or [self.base_domain]
|
|
115
|
-
self.last_request_time = 0
|
|
116
|
-
self.url_hashes = set()
|
|
117
|
-
if obey_robots:
|
|
118
|
-
self.robots = robotparser.RobotFileParser()
|
|
119
|
-
robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
|
|
120
|
-
try:
|
|
121
|
-
self.robots.set_url(robots_url)
|
|
122
|
-
self.robots.read()
|
|
123
|
-
except Exception:
|
|
124
|
-
self.robots = None
|
|
125
|
-
else:
|
|
126
|
-
self.robots = None
|
|
127
|
-
|
|
128
|
-
def _normalize_url(self, url: str) -> str:
|
|
129
|
-
url = url.split('#')[0]
|
|
130
|
-
url = re.sub(r'\?.*$', '', url) # Remove query params
|
|
131
|
-
return url.rstrip('/')
|
|
132
|
-
|
|
133
|
-
def _is_valid_url(self, url: str) -> bool:
|
|
134
|
-
"""
|
|
135
|
-
Check if a URL is valid and within the same domain.
|
|
136
|
-
|
|
137
|
-
Args:
|
|
138
|
-
url (str): URL to validate
|
|
139
|
-
|
|
140
|
-
Returns:
|
|
141
|
-
bool: Whether the URL is valid
|
|
142
|
-
"""
|
|
143
|
-
try:
|
|
144
|
-
parsed_base = urllib.parse.urlparse(self.base_url)
|
|
145
|
-
parsed_url = urllib.parse.urlparse(url)
|
|
146
|
-
if parsed_url.scheme not in ["http", "https"]:
|
|
147
|
-
return False
|
|
148
|
-
# Allow crawling subdomains
|
|
149
|
-
if not parsed_url.netloc.endswith(self.base_domain):
|
|
150
|
-
return False
|
|
151
|
-
if self.obey_robots and self.robots:
|
|
152
|
-
return self.robots.can_fetch("*", url)
|
|
153
|
-
return True
|
|
154
|
-
except Exception:
|
|
155
|
-
return False
|
|
156
|
-
|
|
157
|
-
def _is_duplicate(self, url: str) -> bool:
|
|
158
|
-
norm = self._normalize_url(url)
|
|
159
|
-
url_hash = hashlib.md5(norm.encode()).hexdigest()
|
|
160
|
-
if url_hash in self.url_hashes:
|
|
161
|
-
return True
|
|
162
|
-
self.url_hashes.add(url_hash)
|
|
163
|
-
return False
|
|
164
|
-
|
|
165
|
-
def _extract_main_text(self, soup):
|
|
166
|
-
# Try to extract main content (simple heuristic)
|
|
167
|
-
main = soup.find('main')
|
|
168
|
-
if main:
|
|
169
|
-
return main.get_text(separator=" ", strip=True)
|
|
170
|
-
article = soup.find('article')
|
|
171
|
-
if article:
|
|
172
|
-
return article.get_text(separator=" ", strip=True)
|
|
173
|
-
# fallback to body
|
|
174
|
-
body = soup.find('body')
|
|
175
|
-
if body:
|
|
176
|
-
return body.get_text(separator=" ", strip=True)
|
|
177
|
-
return soup.get_text(separator=" ", strip=True)
|
|
178
|
-
|
|
179
|
-
def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
|
|
180
|
-
"""
|
|
181
|
-
Crawl a single page and extract information.
|
|
182
|
-
|
|
183
|
-
Args:
|
|
184
|
-
url (str): URL to crawl
|
|
185
|
-
depth (int, optional): Current crawl depth
|
|
186
|
-
|
|
187
|
-
Returns:
|
|
188
|
-
Dict[str, Union[str, List[str]]]: Crawled page information
|
|
189
|
-
"""
|
|
190
|
-
if url in self.visited_urls or self._is_duplicate(url):
|
|
191
|
-
return {}
|
|
192
|
-
# Log URL to crawl
|
|
193
|
-
print(f"Attempting to crawl URL: {url} (depth: {depth})")
|
|
194
|
-
|
|
195
|
-
# Throttle requests
|
|
196
|
-
now = time.time()
|
|
197
|
-
if self.last_request_time:
|
|
198
|
-
elapsed = now - self.last_request_time
|
|
199
|
-
if elapsed < self.delay:
|
|
200
|
-
time.sleep(self.delay - elapsed)
|
|
201
|
-
self.last_request_time = time.time()
|
|
202
|
-
try:
|
|
203
|
-
response = self.session.get(url, timeout=10)
|
|
204
|
-
response.raise_for_status()
|
|
205
|
-
if not response.headers.get('Content-Type', '').startswith('text/html'):
|
|
206
|
-
return {}
|
|
207
|
-
scout = Scout(response.content, features="lxml")
|
|
208
|
-
title_result = scout.find("title")
|
|
209
|
-
title = title_result[0].get_text() if title_result else ""
|
|
210
|
-
|
|
211
|
-
# Remove only script and style tags before extracting text
|
|
212
|
-
for tag_name in self.tags_to_remove:
|
|
213
|
-
for tag in scout._soup.find_all(tag_name):
|
|
214
|
-
tag.decompose()
|
|
215
|
-
|
|
216
|
-
visible_text = self._extract_main_text(scout._soup)
|
|
217
|
-
|
|
218
|
-
# Extract links from header, footer, nav, etc.
|
|
219
|
-
essential_links = []
|
|
220
|
-
for essential_tag in ['header', 'nav', 'footer']:
|
|
221
|
-
elements = scout.find_all(essential_tag)
|
|
222
|
-
for element in elements:
|
|
223
|
-
links = element.find_all('a', href=True)
|
|
224
|
-
essential_links.extend(
|
|
225
|
-
urllib.parse.urljoin(url, link.get('href'))
|
|
226
|
-
for link in links
|
|
227
|
-
if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
|
|
228
|
-
)
|
|
229
|
-
|
|
230
|
-
all_links = [
|
|
231
|
-
urllib.parse.urljoin(url, link.get('href'))
|
|
232
|
-
for link in scout.find_all('a', href=True)
|
|
233
|
-
if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
|
|
234
|
-
]
|
|
235
|
-
|
|
236
|
-
combined_links = list(set(all_links + essential_links))
|
|
237
|
-
|
|
238
|
-
page_info = {
|
|
239
|
-
'url': url,
|
|
240
|
-
'title': title,
|
|
241
|
-
'links': combined_links,
|
|
242
|
-
'text': visible_text,
|
|
243
|
-
'depth': depth,
|
|
244
|
-
'timestamp': datetime.utcnow().isoformat(),
|
|
245
|
-
'headers': dict(response.headers),
|
|
246
|
-
}
|
|
247
|
-
self.visited_urls.add(url)
|
|
248
|
-
self.crawled_pages.append(page_info)
|
|
249
|
-
return page_info
|
|
250
|
-
except Exception as e:
|
|
251
|
-
print(f"Error crawling {url}: {e}")
|
|
252
|
-
return {}
|
|
253
|
-
|
|
254
|
-
def crawl(self):
|
|
255
|
-
"""
|
|
256
|
-
Start web crawling from base URL and yield each crawled page in real time.
|
|
257
|
-
|
|
258
|
-
Yields:
|
|
259
|
-
Dict[str, Union[str, List[str]]]: Crawled page information
|
|
260
|
-
"""
|
|
261
|
-
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
|
262
|
-
futures = {executor.submit(self._crawl_page, self.base_url, 0)}
|
|
263
|
-
submitted_links: set[str] = set()
|
|
264
|
-
|
|
265
|
-
while futures:
|
|
266
|
-
if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
|
|
267
|
-
break
|
|
268
|
-
done, not_done = concurrent.futures.wait(
|
|
269
|
-
futures, return_when=concurrent.futures.FIRST_COMPLETED
|
|
270
|
-
)
|
|
271
|
-
futures = not_done
|
|
272
|
-
|
|
273
|
-
for future in done:
|
|
274
|
-
page_info = future.result()
|
|
275
|
-
|
|
276
|
-
if page_info:
|
|
277
|
-
yield page_info
|
|
278
|
-
|
|
279
|
-
if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
|
|
280
|
-
return
|
|
281
|
-
|
|
282
|
-
for link in page_info.get("links", []):
|
|
283
|
-
if (
|
|
284
|
-
(self.max_pages is None or len(self.visited_urls) < self.max_pages)
|
|
285
|
-
and link not in self.visited_urls
|
|
286
|
-
and link not in submitted_links
|
|
287
|
-
):
|
|
288
|
-
submitted_links.add(link)
|
|
289
|
-
futures.add(
|
|
290
|
-
executor.submit(
|
|
291
|
-
self._crawl_page,
|
|
292
|
-
link,
|
|
293
|
-
page_info.get("depth", 0) + 1,
|
|
294
|
-
)
|
|
295
|
-
)
|
|
296
|
-
else:
|
|
297
|
-
print(f"No page info retrieved from crawling")
|
|
1
|
+
"""
|
|
2
|
+
Scout Crawler Module - Ultra Advanced Web Crawling System
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import concurrent.futures
|
|
6
|
+
import urllib.parse
|
|
7
|
+
import time
|
|
8
|
+
import hashlib
|
|
9
|
+
import re
|
|
10
|
+
import json
|
|
11
|
+
import sqlite3
|
|
12
|
+
import threading
|
|
13
|
+
import queue
|
|
14
|
+
import logging
|
|
15
|
+
import mimetypes
|
|
16
|
+
import pickle
|
|
17
|
+
import asyncio
|
|
18
|
+
import aiohttp
|
|
19
|
+
import random
|
|
20
|
+
from urllib import robotparser
|
|
21
|
+
from datetime import datetime, timedelta
|
|
22
|
+
from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
|
|
23
|
+
from collections import defaultdict, deque
|
|
24
|
+
from dataclasses import dataclass, field
|
|
25
|
+
from enum import Enum
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
from webscout.litagent import LitAgent
|
|
30
|
+
except ImportError:
|
|
31
|
+
LitAgent = None
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
from curl_cffi.requests import Session
|
|
35
|
+
except ImportError:
|
|
36
|
+
import requests
|
|
37
|
+
Session = requests.Session
|
|
38
|
+
|
|
39
|
+
from .scout import Scout
|
|
40
|
+
from .text_analyzer import ScoutTextAnalyzer
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class CrawlConfig:
|
|
45
|
+
"""Configuration for the crawler."""
|
|
46
|
+
max_pages: int = 1000
|
|
47
|
+
max_depth: int = 10
|
|
48
|
+
delay: float = 0.5
|
|
49
|
+
obey_robots: bool = True
|
|
50
|
+
crawl_subdomains: bool = True
|
|
51
|
+
max_workers: int = 10
|
|
52
|
+
timeout: int = 30
|
|
53
|
+
retry_attempts: int = 3
|
|
54
|
+
include_external_links: bool = False
|
|
55
|
+
extract_metadata: bool = True
|
|
56
|
+
extract_structured_data: bool = True
|
|
57
|
+
extract_semantic_content: bool = True
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class PageData:
|
|
62
|
+
"""Comprehensive page data for LLM training."""
|
|
63
|
+
url: str
|
|
64
|
+
title: str
|
|
65
|
+
text: str
|
|
66
|
+
clean_text: str
|
|
67
|
+
markdown_text: str
|
|
68
|
+
links: List[str]
|
|
69
|
+
internal_links: List[str]
|
|
70
|
+
external_links: List[str]
|
|
71
|
+
metadata: Dict[str, Any]
|
|
72
|
+
structured_data: Dict[str, Any]
|
|
73
|
+
semantic_content: Dict[str, Any]
|
|
74
|
+
headers: Dict[str, str]
|
|
75
|
+
status_code: int
|
|
76
|
+
content_type: str
|
|
77
|
+
language: str
|
|
78
|
+
timestamp: str
|
|
79
|
+
depth: int
|
|
80
|
+
word_count: int
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ScoutCrawler:
|
|
84
|
+
"""
|
|
85
|
+
Ultra-advanced web crawling utility optimized for LLM data collection.
|
|
86
|
+
"""
|
|
87
|
+
def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
|
|
88
|
+
"""
|
|
89
|
+
Initialize the web crawler.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
base_url (str): Starting URL to crawl
|
|
93
|
+
max_pages (int, optional): Maximum number of pages to crawl
|
|
94
|
+
tags_to_remove (List[str], optional): List of tags to remove
|
|
95
|
+
"""
|
|
96
|
+
self.base_url = base_url
|
|
97
|
+
self.max_pages = max_pages
|
|
98
|
+
self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
|
|
99
|
+
"script",
|
|
100
|
+
"style"
|
|
101
|
+
]
|
|
102
|
+
self.visited_urls = set()
|
|
103
|
+
self.crawled_pages = []
|
|
104
|
+
self.session = session or Session()
|
|
105
|
+
self.agent = LitAgent()
|
|
106
|
+
# Use all headers and generate fingerprint
|
|
107
|
+
self.session.headers = self.agent.generate_fingerprint()
|
|
108
|
+
self.session.headers.setdefault("User-Agent", self.agent.chrome())
|
|
109
|
+
self.delay = delay
|
|
110
|
+
self.obey_robots = obey_robots
|
|
111
|
+
# Allow crawling of subdomains by default
|
|
112
|
+
base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
|
|
113
|
+
self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
|
|
114
|
+
self.allowed_domains = allowed_domains or [self.base_domain]
|
|
115
|
+
self.last_request_time = 0
|
|
116
|
+
self.url_hashes = set()
|
|
117
|
+
if obey_robots:
|
|
118
|
+
self.robots = robotparser.RobotFileParser()
|
|
119
|
+
robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
|
|
120
|
+
try:
|
|
121
|
+
self.robots.set_url(robots_url)
|
|
122
|
+
self.robots.read()
|
|
123
|
+
except Exception:
|
|
124
|
+
self.robots = None
|
|
125
|
+
else:
|
|
126
|
+
self.robots = None
|
|
127
|
+
|
|
128
|
+
def _normalize_url(self, url: str) -> str:
|
|
129
|
+
url = url.split('#')[0]
|
|
130
|
+
url = re.sub(r'\?.*$', '', url) # Remove query params
|
|
131
|
+
return url.rstrip('/')
|
|
132
|
+
|
|
133
|
+
def _is_valid_url(self, url: str) -> bool:
|
|
134
|
+
"""
|
|
135
|
+
Check if a URL is valid and within the same domain.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
url (str): URL to validate
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
bool: Whether the URL is valid
|
|
142
|
+
"""
|
|
143
|
+
try:
|
|
144
|
+
parsed_base = urllib.parse.urlparse(self.base_url)
|
|
145
|
+
parsed_url = urllib.parse.urlparse(url)
|
|
146
|
+
if parsed_url.scheme not in ["http", "https"]:
|
|
147
|
+
return False
|
|
148
|
+
# Allow crawling subdomains
|
|
149
|
+
if not parsed_url.netloc.endswith(self.base_domain):
|
|
150
|
+
return False
|
|
151
|
+
if self.obey_robots and self.robots:
|
|
152
|
+
return self.robots.can_fetch("*", url)
|
|
153
|
+
return True
|
|
154
|
+
except Exception:
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
def _is_duplicate(self, url: str) -> bool:
|
|
158
|
+
norm = self._normalize_url(url)
|
|
159
|
+
url_hash = hashlib.md5(norm.encode()).hexdigest()
|
|
160
|
+
if url_hash in self.url_hashes:
|
|
161
|
+
return True
|
|
162
|
+
self.url_hashes.add(url_hash)
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
def _extract_main_text(self, soup):
|
|
166
|
+
# Try to extract main content (simple heuristic)
|
|
167
|
+
main = soup.find('main')
|
|
168
|
+
if main:
|
|
169
|
+
return main.get_text(separator=" ", strip=True)
|
|
170
|
+
article = soup.find('article')
|
|
171
|
+
if article:
|
|
172
|
+
return article.get_text(separator=" ", strip=True)
|
|
173
|
+
# fallback to body
|
|
174
|
+
body = soup.find('body')
|
|
175
|
+
if body:
|
|
176
|
+
return body.get_text(separator=" ", strip=True)
|
|
177
|
+
return soup.get_text(separator=" ", strip=True)
|
|
178
|
+
|
|
179
|
+
def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
|
|
180
|
+
"""
|
|
181
|
+
Crawl a single page and extract information.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
url (str): URL to crawl
|
|
185
|
+
depth (int, optional): Current crawl depth
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Dict[str, Union[str, List[str]]]: Crawled page information
|
|
189
|
+
"""
|
|
190
|
+
if url in self.visited_urls or self._is_duplicate(url):
|
|
191
|
+
return {}
|
|
192
|
+
# Log URL to crawl
|
|
193
|
+
print(f"Attempting to crawl URL: {url} (depth: {depth})")
|
|
194
|
+
|
|
195
|
+
# Throttle requests
|
|
196
|
+
now = time.time()
|
|
197
|
+
if self.last_request_time:
|
|
198
|
+
elapsed = now - self.last_request_time
|
|
199
|
+
if elapsed < self.delay:
|
|
200
|
+
time.sleep(self.delay - elapsed)
|
|
201
|
+
self.last_request_time = time.time()
|
|
202
|
+
try:
|
|
203
|
+
response = self.session.get(url, timeout=10)
|
|
204
|
+
response.raise_for_status()
|
|
205
|
+
if not response.headers.get('Content-Type', '').startswith('text/html'):
|
|
206
|
+
return {}
|
|
207
|
+
scout = Scout(response.content, features="lxml")
|
|
208
|
+
title_result = scout.find("title")
|
|
209
|
+
title = title_result[0].get_text() if title_result else ""
|
|
210
|
+
|
|
211
|
+
# Remove only script and style tags before extracting text
|
|
212
|
+
for tag_name in self.tags_to_remove:
|
|
213
|
+
for tag in scout._soup.find_all(tag_name):
|
|
214
|
+
tag.decompose()
|
|
215
|
+
|
|
216
|
+
visible_text = self._extract_main_text(scout._soup)
|
|
217
|
+
|
|
218
|
+
# Extract links from header, footer, nav, etc.
|
|
219
|
+
essential_links = []
|
|
220
|
+
for essential_tag in ['header', 'nav', 'footer']:
|
|
221
|
+
elements = scout.find_all(essential_tag)
|
|
222
|
+
for element in elements:
|
|
223
|
+
links = element.find_all('a', href=True)
|
|
224
|
+
essential_links.extend(
|
|
225
|
+
urllib.parse.urljoin(url, link.get('href'))
|
|
226
|
+
for link in links
|
|
227
|
+
if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
all_links = [
|
|
231
|
+
urllib.parse.urljoin(url, link.get('href'))
|
|
232
|
+
for link in scout.find_all('a', href=True)
|
|
233
|
+
if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
|
|
234
|
+
]
|
|
235
|
+
|
|
236
|
+
combined_links = list(set(all_links + essential_links))
|
|
237
|
+
|
|
238
|
+
page_info = {
|
|
239
|
+
'url': url,
|
|
240
|
+
'title': title,
|
|
241
|
+
'links': combined_links,
|
|
242
|
+
'text': visible_text,
|
|
243
|
+
'depth': depth,
|
|
244
|
+
'timestamp': datetime.utcnow().isoformat(),
|
|
245
|
+
'headers': dict(response.headers),
|
|
246
|
+
}
|
|
247
|
+
self.visited_urls.add(url)
|
|
248
|
+
self.crawled_pages.append(page_info)
|
|
249
|
+
return page_info
|
|
250
|
+
except Exception as e:
|
|
251
|
+
print(f"Error crawling {url}: {e}")
|
|
252
|
+
return {}
|
|
253
|
+
|
|
254
|
+
def crawl(self):
|
|
255
|
+
"""
|
|
256
|
+
Start web crawling from base URL and yield each crawled page in real time.
|
|
257
|
+
|
|
258
|
+
Yields:
|
|
259
|
+
Dict[str, Union[str, List[str]]]: Crawled page information
|
|
260
|
+
"""
|
|
261
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
|
262
|
+
futures = {executor.submit(self._crawl_page, self.base_url, 0)}
|
|
263
|
+
submitted_links: set[str] = set()
|
|
264
|
+
|
|
265
|
+
while futures:
|
|
266
|
+
if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
|
|
267
|
+
break
|
|
268
|
+
done, not_done = concurrent.futures.wait(
|
|
269
|
+
futures, return_when=concurrent.futures.FIRST_COMPLETED
|
|
270
|
+
)
|
|
271
|
+
futures = not_done
|
|
272
|
+
|
|
273
|
+
for future in done:
|
|
274
|
+
page_info = future.result()
|
|
275
|
+
|
|
276
|
+
if page_info:
|
|
277
|
+
yield page_info
|
|
278
|
+
|
|
279
|
+
if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
for link in page_info.get("links", []):
|
|
283
|
+
if (
|
|
284
|
+
(self.max_pages is None or len(self.visited_urls) < self.max_pages)
|
|
285
|
+
and link not in self.visited_urls
|
|
286
|
+
and link not in submitted_links
|
|
287
|
+
):
|
|
288
|
+
submitted_links.add(link)
|
|
289
|
+
futures.add(
|
|
290
|
+
executor.submit(
|
|
291
|
+
self._crawl_page,
|
|
292
|
+
link,
|
|
293
|
+
page_info.get("depth", 0) + 1,
|
|
294
|
+
)
|
|
295
|
+
)
|
|
296
|
+
else:
|
|
297
|
+
print(f"No page info retrieved from crawling")
|