webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,8 @@
1
- """
2
- Scout: A powerful, zero-dependency web scraping library
3
- """
4
-
5
- from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
6
- from .element import Tag, NavigableString
7
-
8
- __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
1
+ """
2
+ Scout: A powerful, zero-dependency web scraping library
3
+ """
4
+
5
+ from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
6
+ from .element import Tag, NavigableString
7
+
8
+ __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
@@ -1,7 +1,7 @@
1
- from .text_analyzer import ScoutTextAnalyzer
2
- from .web_analyzer import ScoutWebAnalyzer
3
- from .search_result import ScoutSearchResult
4
- from .crawler import ScoutCrawler
5
- from .scout import Scout
6
-
1
+ from .text_analyzer import ScoutTextAnalyzer
2
+ from .web_analyzer import ScoutWebAnalyzer
3
+ from .search_result import ScoutSearchResult
4
+ from .crawler import ScoutCrawler
5
+ from .scout import Scout
6
+
7
7
  __all__ = ['ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult', 'ScoutCrawler', 'Scout']
@@ -1,297 +1,297 @@
1
- """
2
- Scout Crawler Module - Ultra Advanced Web Crawling System
3
- """
4
-
5
- import concurrent.futures
6
- import urllib.parse
7
- import time
8
- import hashlib
9
- import re
10
- import json
11
- import sqlite3
12
- import threading
13
- import queue
14
- import logging
15
- import mimetypes
16
- import pickle
17
- import asyncio
18
- import aiohttp
19
- import random
20
- from urllib import robotparser
21
- from datetime import datetime, timedelta
22
- from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
23
- from collections import defaultdict, deque
24
- from dataclasses import dataclass, field
25
- from enum import Enum
26
- from pathlib import Path
27
-
28
- try:
29
- from webscout.litagent import LitAgent
30
- except ImportError:
31
- LitAgent = None
32
-
33
- try:
34
- from curl_cffi.requests import Session
35
- except ImportError:
36
- import requests
37
- Session = requests.Session
38
-
39
- from .scout import Scout
40
- from .text_analyzer import ScoutTextAnalyzer
41
-
42
-
43
- @dataclass
44
- class CrawlConfig:
45
- """Configuration for the crawler."""
46
- max_pages: int = 1000
47
- max_depth: int = 10
48
- delay: float = 0.5
49
- obey_robots: bool = True
50
- crawl_subdomains: bool = True
51
- max_workers: int = 10
52
- timeout: int = 30
53
- retry_attempts: int = 3
54
- include_external_links: bool = False
55
- extract_metadata: bool = True
56
- extract_structured_data: bool = True
57
- extract_semantic_content: bool = True
58
-
59
-
60
- @dataclass
61
- class PageData:
62
- """Comprehensive page data for LLM training."""
63
- url: str
64
- title: str
65
- text: str
66
- clean_text: str
67
- markdown_text: str
68
- links: List[str]
69
- internal_links: List[str]
70
- external_links: List[str]
71
- metadata: Dict[str, Any]
72
- structured_data: Dict[str, Any]
73
- semantic_content: Dict[str, Any]
74
- headers: Dict[str, str]
75
- status_code: int
76
- content_type: str
77
- language: str
78
- timestamp: str
79
- depth: int
80
- word_count: int
81
-
82
-
83
- class ScoutCrawler:
84
- """
85
- Ultra-advanced web crawling utility optimized for LLM data collection.
86
- """
87
- def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
88
- """
89
- Initialize the web crawler.
90
-
91
- Args:
92
- base_url (str): Starting URL to crawl
93
- max_pages (int, optional): Maximum number of pages to crawl
94
- tags_to_remove (List[str], optional): List of tags to remove
95
- """
96
- self.base_url = base_url
97
- self.max_pages = max_pages
98
- self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
99
- "script",
100
- "style"
101
- ]
102
- self.visited_urls = set()
103
- self.crawled_pages = []
104
- self.session = session or Session()
105
- self.agent = LitAgent()
106
- # Use all headers and generate fingerprint
107
- self.session.headers = self.agent.generate_fingerprint()
108
- self.session.headers.setdefault("User-Agent", self.agent.chrome())
109
- self.delay = delay
110
- self.obey_robots = obey_robots
111
- # Allow crawling of subdomains by default
112
- base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
113
- self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
114
- self.allowed_domains = allowed_domains or [self.base_domain]
115
- self.last_request_time = 0
116
- self.url_hashes = set()
117
- if obey_robots:
118
- self.robots = robotparser.RobotFileParser()
119
- robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
120
- try:
121
- self.robots.set_url(robots_url)
122
- self.robots.read()
123
- except Exception:
124
- self.robots = None
125
- else:
126
- self.robots = None
127
-
128
- def _normalize_url(self, url: str) -> str:
129
- url = url.split('#')[0]
130
- url = re.sub(r'\?.*$', '', url) # Remove query params
131
- return url.rstrip('/')
132
-
133
- def _is_valid_url(self, url: str) -> bool:
134
- """
135
- Check if a URL is valid and within the same domain.
136
-
137
- Args:
138
- url (str): URL to validate
139
-
140
- Returns:
141
- bool: Whether the URL is valid
142
- """
143
- try:
144
- parsed_base = urllib.parse.urlparse(self.base_url)
145
- parsed_url = urllib.parse.urlparse(url)
146
- if parsed_url.scheme not in ["http", "https"]:
147
- return False
148
- # Allow crawling subdomains
149
- if not parsed_url.netloc.endswith(self.base_domain):
150
- return False
151
- if self.obey_robots and self.robots:
152
- return self.robots.can_fetch("*", url)
153
- return True
154
- except Exception:
155
- return False
156
-
157
- def _is_duplicate(self, url: str) -> bool:
158
- norm = self._normalize_url(url)
159
- url_hash = hashlib.md5(norm.encode()).hexdigest()
160
- if url_hash in self.url_hashes:
161
- return True
162
- self.url_hashes.add(url_hash)
163
- return False
164
-
165
- def _extract_main_text(self, soup):
166
- # Try to extract main content (simple heuristic)
167
- main = soup.find('main')
168
- if main:
169
- return main.get_text(separator=" ", strip=True)
170
- article = soup.find('article')
171
- if article:
172
- return article.get_text(separator=" ", strip=True)
173
- # fallback to body
174
- body = soup.find('body')
175
- if body:
176
- return body.get_text(separator=" ", strip=True)
177
- return soup.get_text(separator=" ", strip=True)
178
-
179
- def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
180
- """
181
- Crawl a single page and extract information.
182
-
183
- Args:
184
- url (str): URL to crawl
185
- depth (int, optional): Current crawl depth
186
-
187
- Returns:
188
- Dict[str, Union[str, List[str]]]: Crawled page information
189
- """
190
- if url in self.visited_urls or self._is_duplicate(url):
191
- return {}
192
- # Log URL to crawl
193
- print(f"Attempting to crawl URL: {url} (depth: {depth})")
194
-
195
- # Throttle requests
196
- now = time.time()
197
- if self.last_request_time:
198
- elapsed = now - self.last_request_time
199
- if elapsed < self.delay:
200
- time.sleep(self.delay - elapsed)
201
- self.last_request_time = time.time()
202
- try:
203
- response = self.session.get(url, timeout=10)
204
- response.raise_for_status()
205
- if not response.headers.get('Content-Type', '').startswith('text/html'):
206
- return {}
207
- scout = Scout(response.content, features="lxml")
208
- title_result = scout.find("title")
209
- title = title_result[0].get_text() if title_result else ""
210
-
211
- # Remove only script and style tags before extracting text
212
- for tag_name in self.tags_to_remove:
213
- for tag in scout._soup.find_all(tag_name):
214
- tag.decompose()
215
-
216
- visible_text = self._extract_main_text(scout._soup)
217
-
218
- # Extract links from header, footer, nav, etc.
219
- essential_links = []
220
- for essential_tag in ['header', 'nav', 'footer']:
221
- elements = scout.find_all(essential_tag)
222
- for element in elements:
223
- links = element.find_all('a', href=True)
224
- essential_links.extend(
225
- urllib.parse.urljoin(url, link.get('href'))
226
- for link in links
227
- if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
228
- )
229
-
230
- all_links = [
231
- urllib.parse.urljoin(url, link.get('href'))
232
- for link in scout.find_all('a', href=True)
233
- if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
234
- ]
235
-
236
- combined_links = list(set(all_links + essential_links))
237
-
238
- page_info = {
239
- 'url': url,
240
- 'title': title,
241
- 'links': combined_links,
242
- 'text': visible_text,
243
- 'depth': depth,
244
- 'timestamp': datetime.utcnow().isoformat(),
245
- 'headers': dict(response.headers),
246
- }
247
- self.visited_urls.add(url)
248
- self.crawled_pages.append(page_info)
249
- return page_info
250
- except Exception as e:
251
- print(f"Error crawling {url}: {e}")
252
- return {}
253
-
254
- def crawl(self):
255
- """
256
- Start web crawling from base URL and yield each crawled page in real time.
257
-
258
- Yields:
259
- Dict[str, Union[str, List[str]]]: Crawled page information
260
- """
261
- with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
262
- futures = {executor.submit(self._crawl_page, self.base_url, 0)}
263
- submitted_links: set[str] = set()
264
-
265
- while futures:
266
- if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
267
- break
268
- done, not_done = concurrent.futures.wait(
269
- futures, return_when=concurrent.futures.FIRST_COMPLETED
270
- )
271
- futures = not_done
272
-
273
- for future in done:
274
- page_info = future.result()
275
-
276
- if page_info:
277
- yield page_info
278
-
279
- if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
280
- return
281
-
282
- for link in page_info.get("links", []):
283
- if (
284
- (self.max_pages is None or len(self.visited_urls) < self.max_pages)
285
- and link not in self.visited_urls
286
- and link not in submitted_links
287
- ):
288
- submitted_links.add(link)
289
- futures.add(
290
- executor.submit(
291
- self._crawl_page,
292
- link,
293
- page_info.get("depth", 0) + 1,
294
- )
295
- )
296
- else:
297
- print(f"No page info retrieved from crawling")
1
+ """
2
+ Scout Crawler Module - Ultra Advanced Web Crawling System
3
+ """
4
+
5
+ import concurrent.futures
6
+ import urllib.parse
7
+ import time
8
+ import hashlib
9
+ import re
10
+ import json
11
+ import sqlite3
12
+ import threading
13
+ import queue
14
+ import logging
15
+ import mimetypes
16
+ import pickle
17
+ import asyncio
18
+ import aiohttp
19
+ import random
20
+ from urllib import robotparser
21
+ from datetime import datetime, timedelta
22
+ from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
23
+ from collections import defaultdict, deque
24
+ from dataclasses import dataclass, field
25
+ from enum import Enum
26
+ from pathlib import Path
27
+
28
+ try:
29
+ from webscout.litagent import LitAgent
30
+ except ImportError:
31
+ LitAgent = None
32
+
33
+ try:
34
+ from curl_cffi.requests import Session
35
+ except ImportError:
36
+ import requests
37
+ Session = requests.Session
38
+
39
+ from .scout import Scout
40
+ from .text_analyzer import ScoutTextAnalyzer
41
+
42
+
43
+ @dataclass
44
+ class CrawlConfig:
45
+ """Configuration for the crawler."""
46
+ max_pages: int = 1000
47
+ max_depth: int = 10
48
+ delay: float = 0.5
49
+ obey_robots: bool = True
50
+ crawl_subdomains: bool = True
51
+ max_workers: int = 10
52
+ timeout: int = 30
53
+ retry_attempts: int = 3
54
+ include_external_links: bool = False
55
+ extract_metadata: bool = True
56
+ extract_structured_data: bool = True
57
+ extract_semantic_content: bool = True
58
+
59
+
60
+ @dataclass
61
+ class PageData:
62
+ """Comprehensive page data for LLM training."""
63
+ url: str
64
+ title: str
65
+ text: str
66
+ clean_text: str
67
+ markdown_text: str
68
+ links: List[str]
69
+ internal_links: List[str]
70
+ external_links: List[str]
71
+ metadata: Dict[str, Any]
72
+ structured_data: Dict[str, Any]
73
+ semantic_content: Dict[str, Any]
74
+ headers: Dict[str, str]
75
+ status_code: int
76
+ content_type: str
77
+ language: str
78
+ timestamp: str
79
+ depth: int
80
+ word_count: int
81
+
82
+
83
+ class ScoutCrawler:
84
+ """
85
+ Ultra-advanced web crawling utility optimized for LLM data collection.
86
+ """
87
+ def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
88
+ """
89
+ Initialize the web crawler.
90
+
91
+ Args:
92
+ base_url (str): Starting URL to crawl
93
+ max_pages (int, optional): Maximum number of pages to crawl
94
+ tags_to_remove (List[str], optional): List of tags to remove
95
+ """
96
+ self.base_url = base_url
97
+ self.max_pages = max_pages
98
+ self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
99
+ "script",
100
+ "style"
101
+ ]
102
+ self.visited_urls = set()
103
+ self.crawled_pages = []
104
+ self.session = session or Session()
105
+ self.agent = LitAgent()
106
+ # Use all headers and generate fingerprint
107
+ self.session.headers = self.agent.generate_fingerprint()
108
+ self.session.headers.setdefault("User-Agent", self.agent.chrome())
109
+ self.delay = delay
110
+ self.obey_robots = obey_robots
111
+ # Allow crawling of subdomains by default
112
+ base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
113
+ self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
114
+ self.allowed_domains = allowed_domains or [self.base_domain]
115
+ self.last_request_time = 0
116
+ self.url_hashes = set()
117
+ if obey_robots:
118
+ self.robots = robotparser.RobotFileParser()
119
+ robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
120
+ try:
121
+ self.robots.set_url(robots_url)
122
+ self.robots.read()
123
+ except Exception:
124
+ self.robots = None
125
+ else:
126
+ self.robots = None
127
+
128
+ def _normalize_url(self, url: str) -> str:
129
+ url = url.split('#')[0]
130
+ url = re.sub(r'\?.*$', '', url) # Remove query params
131
+ return url.rstrip('/')
132
+
133
+ def _is_valid_url(self, url: str) -> bool:
134
+ """
135
+ Check if a URL is valid and within the same domain.
136
+
137
+ Args:
138
+ url (str): URL to validate
139
+
140
+ Returns:
141
+ bool: Whether the URL is valid
142
+ """
143
+ try:
144
+ parsed_base = urllib.parse.urlparse(self.base_url)
145
+ parsed_url = urllib.parse.urlparse(url)
146
+ if parsed_url.scheme not in ["http", "https"]:
147
+ return False
148
+ # Allow crawling subdomains
149
+ if not parsed_url.netloc.endswith(self.base_domain):
150
+ return False
151
+ if self.obey_robots and self.robots:
152
+ return self.robots.can_fetch("*", url)
153
+ return True
154
+ except Exception:
155
+ return False
156
+
157
+ def _is_duplicate(self, url: str) -> bool:
158
+ norm = self._normalize_url(url)
159
+ url_hash = hashlib.md5(norm.encode()).hexdigest()
160
+ if url_hash in self.url_hashes:
161
+ return True
162
+ self.url_hashes.add(url_hash)
163
+ return False
164
+
165
+ def _extract_main_text(self, soup):
166
+ # Try to extract main content (simple heuristic)
167
+ main = soup.find('main')
168
+ if main:
169
+ return main.get_text(separator=" ", strip=True)
170
+ article = soup.find('article')
171
+ if article:
172
+ return article.get_text(separator=" ", strip=True)
173
+ # fallback to body
174
+ body = soup.find('body')
175
+ if body:
176
+ return body.get_text(separator=" ", strip=True)
177
+ return soup.get_text(separator=" ", strip=True)
178
+
179
+ def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
180
+ """
181
+ Crawl a single page and extract information.
182
+
183
+ Args:
184
+ url (str): URL to crawl
185
+ depth (int, optional): Current crawl depth
186
+
187
+ Returns:
188
+ Dict[str, Union[str, List[str]]]: Crawled page information
189
+ """
190
+ if url in self.visited_urls or self._is_duplicate(url):
191
+ return {}
192
+ # Log URL to crawl
193
+ print(f"Attempting to crawl URL: {url} (depth: {depth})")
194
+
195
+ # Throttle requests
196
+ now = time.time()
197
+ if self.last_request_time:
198
+ elapsed = now - self.last_request_time
199
+ if elapsed < self.delay:
200
+ time.sleep(self.delay - elapsed)
201
+ self.last_request_time = time.time()
202
+ try:
203
+ response = self.session.get(url, timeout=10)
204
+ response.raise_for_status()
205
+ if not response.headers.get('Content-Type', '').startswith('text/html'):
206
+ return {}
207
+ scout = Scout(response.content, features="lxml")
208
+ title_result = scout.find("title")
209
+ title = title_result[0].get_text() if title_result else ""
210
+
211
+ # Remove only script and style tags before extracting text
212
+ for tag_name in self.tags_to_remove:
213
+ for tag in scout._soup.find_all(tag_name):
214
+ tag.decompose()
215
+
216
+ visible_text = self._extract_main_text(scout._soup)
217
+
218
+ # Extract links from header, footer, nav, etc.
219
+ essential_links = []
220
+ for essential_tag in ['header', 'nav', 'footer']:
221
+ elements = scout.find_all(essential_tag)
222
+ for element in elements:
223
+ links = element.find_all('a', href=True)
224
+ essential_links.extend(
225
+ urllib.parse.urljoin(url, link.get('href'))
226
+ for link in links
227
+ if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
228
+ )
229
+
230
+ all_links = [
231
+ urllib.parse.urljoin(url, link.get('href'))
232
+ for link in scout.find_all('a', href=True)
233
+ if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
234
+ ]
235
+
236
+ combined_links = list(set(all_links + essential_links))
237
+
238
+ page_info = {
239
+ 'url': url,
240
+ 'title': title,
241
+ 'links': combined_links,
242
+ 'text': visible_text,
243
+ 'depth': depth,
244
+ 'timestamp': datetime.utcnow().isoformat(),
245
+ 'headers': dict(response.headers),
246
+ }
247
+ self.visited_urls.add(url)
248
+ self.crawled_pages.append(page_info)
249
+ return page_info
250
+ except Exception as e:
251
+ print(f"Error crawling {url}: {e}")
252
+ return {}
253
+
254
+ def crawl(self):
255
+ """
256
+ Start web crawling from base URL and yield each crawled page in real time.
257
+
258
+ Yields:
259
+ Dict[str, Union[str, List[str]]]: Crawled page information
260
+ """
261
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
262
+ futures = {executor.submit(self._crawl_page, self.base_url, 0)}
263
+ submitted_links: set[str] = set()
264
+
265
+ while futures:
266
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
267
+ break
268
+ done, not_done = concurrent.futures.wait(
269
+ futures, return_when=concurrent.futures.FIRST_COMPLETED
270
+ )
271
+ futures = not_done
272
+
273
+ for future in done:
274
+ page_info = future.result()
275
+
276
+ if page_info:
277
+ yield page_info
278
+
279
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
280
+ return
281
+
282
+ for link in page_info.get("links", []):
283
+ if (
284
+ (self.max_pages is None or len(self.visited_urls) < self.max_pages)
285
+ and link not in self.visited_urls
286
+ and link not in submitted_links
287
+ ):
288
+ submitted_links.add(link)
289
+ futures.add(
290
+ executor.submit(
291
+ self._crawl_page,
292
+ link,
293
+ page_info.get("depth", 0) + 1,
294
+ )
295
+ )
296
+ else:
297
+ print(f"No page info retrieved from crawling")