webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (306) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Andi.py +1 -1
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +237 -236
  67. webscout/Provider/ChatSandbox.py +343 -343
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -208
  70. webscout/Provider/Deepinfra.py +370 -366
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -308
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -164
  76. webscout/Provider/GeminiProxy.py +167 -167
  77. webscout/Provider/GithubChat.py +371 -372
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -383
  80. webscout/Provider/Jadve.py +282 -282
  81. webscout/Provider/K2Think.py +307 -307
  82. webscout/Provider/Koboldai.py +205 -205
  83. webscout/Provider/LambdaChat.py +423 -423
  84. webscout/Provider/Nemotron.py +244 -244
  85. webscout/Provider/Netwrck.py +248 -248
  86. webscout/Provider/OLLAMA.py +395 -395
  87. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  88. webscout/Provider/OPENAI/FalconH1.py +451 -451
  89. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  90. webscout/Provider/OPENAI/K2Think.py +431 -431
  91. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  92. webscout/Provider/OPENAI/PI.py +427 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +465 -465
  96. webscout/Provider/OPENAI/__init__.py +33 -18
  97. webscout/Provider/OPENAI/base.py +248 -248
  98. webscout/Provider/OPENAI/chatglm.py +528 -0
  99. webscout/Provider/OPENAI/chatgpt.py +592 -592
  100. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  101. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  102. webscout/Provider/OPENAI/deepinfra.py +318 -314
  103. webscout/Provider/OPENAI/e2b.py +1665 -1665
  104. webscout/Provider/OPENAI/exaai.py +420 -420
  105. webscout/Provider/OPENAI/exachat.py +452 -452
  106. webscout/Provider/OPENAI/friendli.py +232 -232
  107. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  108. webscout/Provider/OPENAI/groq.py +364 -364
  109. webscout/Provider/OPENAI/heckai.py +314 -314
  110. webscout/Provider/OPENAI/llmchatco.py +337 -337
  111. webscout/Provider/OPENAI/netwrck.py +355 -355
  112. webscout/Provider/OPENAI/oivscode.py +290 -290
  113. webscout/Provider/OPENAI/opkfc.py +518 -518
  114. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  115. webscout/Provider/OPENAI/scirachat.py +535 -535
  116. webscout/Provider/OPENAI/sonus.py +308 -308
  117. webscout/Provider/OPENAI/standardinput.py +442 -442
  118. webscout/Provider/OPENAI/textpollinations.py +340 -340
  119. webscout/Provider/OPENAI/toolbaz.py +419 -416
  120. webscout/Provider/OPENAI/typefully.py +362 -362
  121. webscout/Provider/OPENAI/utils.py +295 -295
  122. webscout/Provider/OPENAI/venice.py +436 -436
  123. webscout/Provider/OPENAI/wisecat.py +387 -387
  124. webscout/Provider/OPENAI/writecream.py +166 -166
  125. webscout/Provider/OPENAI/x0gpt.py +378 -378
  126. webscout/Provider/OPENAI/yep.py +389 -389
  127. webscout/Provider/OpenGPT.py +230 -230
  128. webscout/Provider/Openai.py +243 -243
  129. webscout/Provider/PI.py +405 -405
  130. webscout/Provider/Perplexitylabs.py +430 -430
  131. webscout/Provider/QwenLM.py +272 -272
  132. webscout/Provider/STT/__init__.py +16 -1
  133. webscout/Provider/Sambanova.py +257 -257
  134. webscout/Provider/StandardInput.py +309 -309
  135. webscout/Provider/TTI/README.md +82 -82
  136. webscout/Provider/TTI/__init__.py +33 -18
  137. webscout/Provider/TTI/aiarta.py +413 -413
  138. webscout/Provider/TTI/base.py +136 -136
  139. webscout/Provider/TTI/bing.py +243 -243
  140. webscout/Provider/TTI/gpt1image.py +149 -149
  141. webscout/Provider/TTI/imagen.py +196 -196
  142. webscout/Provider/TTI/infip.py +211 -211
  143. webscout/Provider/TTI/magicstudio.py +232 -232
  144. webscout/Provider/TTI/monochat.py +219 -219
  145. webscout/Provider/TTI/piclumen.py +214 -214
  146. webscout/Provider/TTI/pixelmuse.py +232 -232
  147. webscout/Provider/TTI/pollinations.py +232 -232
  148. webscout/Provider/TTI/together.py +288 -288
  149. webscout/Provider/TTI/utils.py +12 -12
  150. webscout/Provider/TTI/venice.py +367 -367
  151. webscout/Provider/TTS/README.md +192 -192
  152. webscout/Provider/TTS/__init__.py +33 -18
  153. webscout/Provider/TTS/parler.py +110 -110
  154. webscout/Provider/TTS/streamElements.py +333 -333
  155. webscout/Provider/TTS/utils.py +280 -280
  156. webscout/Provider/TeachAnything.py +237 -237
  157. webscout/Provider/TextPollinationsAI.py +310 -310
  158. webscout/Provider/TogetherAI.py +356 -356
  159. webscout/Provider/TwoAI.py +312 -312
  160. webscout/Provider/TypliAI.py +311 -311
  161. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  162. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  163. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  164. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  165. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  166. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  167. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  168. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  169. webscout/Provider/UNFINISHED/liner.py +334 -0
  170. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  171. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  172. webscout/Provider/UNFINISHED/samurai.py +223 -223
  173. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  174. webscout/Provider/Venice.py +250 -250
  175. webscout/Provider/VercelAI.py +256 -256
  176. webscout/Provider/WiseCat.py +231 -231
  177. webscout/Provider/WrDoChat.py +366 -366
  178. webscout/Provider/__init__.py +33 -18
  179. webscout/Provider/ai4chat.py +174 -174
  180. webscout/Provider/akashgpt.py +331 -331
  181. webscout/Provider/cerebras.py +446 -446
  182. webscout/Provider/chatglm.py +394 -301
  183. webscout/Provider/cleeai.py +211 -211
  184. webscout/Provider/elmo.py +282 -282
  185. webscout/Provider/geminiapi.py +208 -208
  186. webscout/Provider/granite.py +261 -261
  187. webscout/Provider/hermes.py +263 -263
  188. webscout/Provider/julius.py +223 -223
  189. webscout/Provider/learnfastai.py +309 -309
  190. webscout/Provider/llama3mitril.py +214 -214
  191. webscout/Provider/llmchat.py +243 -243
  192. webscout/Provider/llmchatco.py +290 -290
  193. webscout/Provider/meta.py +801 -801
  194. webscout/Provider/oivscode.py +309 -309
  195. webscout/Provider/scira_chat.py +383 -383
  196. webscout/Provider/searchchat.py +292 -292
  197. webscout/Provider/sonus.py +258 -258
  198. webscout/Provider/toolbaz.py +370 -367
  199. webscout/Provider/turboseek.py +273 -273
  200. webscout/Provider/typefully.py +207 -207
  201. webscout/Provider/yep.py +372 -372
  202. webscout/__init__.py +27 -31
  203. webscout/__main__.py +5 -5
  204. webscout/auth/api_key_manager.py +189 -189
  205. webscout/auth/config.py +175 -175
  206. webscout/auth/models.py +185 -185
  207. webscout/auth/routes.py +663 -664
  208. webscout/auth/simple_logger.py +236 -236
  209. webscout/cli.py +523 -523
  210. webscout/conversation.py +438 -438
  211. webscout/exceptions.py +361 -361
  212. webscout/litagent/Readme.md +298 -298
  213. webscout/litagent/__init__.py +28 -28
  214. webscout/litagent/agent.py +581 -581
  215. webscout/litagent/constants.py +59 -59
  216. webscout/litprinter/__init__.py +58 -58
  217. webscout/models.py +181 -181
  218. webscout/optimizers.py +419 -419
  219. webscout/prompt_manager.py +288 -288
  220. webscout/sanitize.py +1078 -1078
  221. webscout/scout/README.md +401 -401
  222. webscout/scout/__init__.py +8 -8
  223. webscout/scout/core/__init__.py +6 -6
  224. webscout/scout/core/crawler.py +297 -297
  225. webscout/scout/core/scout.py +706 -706
  226. webscout/scout/core/search_result.py +95 -95
  227. webscout/scout/core/text_analyzer.py +62 -62
  228. webscout/scout/core/text_utils.py +277 -277
  229. webscout/scout/core/web_analyzer.py +51 -51
  230. webscout/scout/element.py +599 -599
  231. webscout/scout/parsers/__init__.py +69 -69
  232. webscout/scout/parsers/html5lib_parser.py +172 -172
  233. webscout/scout/parsers/html_parser.py +236 -236
  234. webscout/scout/parsers/lxml_parser.py +178 -178
  235. webscout/scout/utils.py +37 -37
  236. webscout/search/__init__.py +51 -0
  237. webscout/search/base.py +195 -0
  238. webscout/search/duckduckgo_main.py +54 -0
  239. webscout/search/engines/__init__.py +48 -0
  240. webscout/search/engines/bing.py +84 -0
  241. webscout/search/engines/bing_news.py +52 -0
  242. webscout/search/engines/brave.py +43 -0
  243. webscout/search/engines/duckduckgo/__init__.py +25 -0
  244. webscout/search/engines/duckduckgo/answers.py +78 -0
  245. webscout/search/engines/duckduckgo/base.py +187 -0
  246. webscout/search/engines/duckduckgo/images.py +97 -0
  247. webscout/search/engines/duckduckgo/maps.py +168 -0
  248. webscout/search/engines/duckduckgo/news.py +68 -0
  249. webscout/search/engines/duckduckgo/suggestions.py +21 -0
  250. webscout/search/engines/duckduckgo/text.py +211 -0
  251. webscout/search/engines/duckduckgo/translate.py +47 -0
  252. webscout/search/engines/duckduckgo/videos.py +63 -0
  253. webscout/search/engines/duckduckgo/weather.py +74 -0
  254. webscout/search/engines/mojeek.py +37 -0
  255. webscout/search/engines/wikipedia.py +56 -0
  256. webscout/search/engines/yahoo.py +65 -0
  257. webscout/search/engines/yahoo_news.py +64 -0
  258. webscout/search/engines/yandex.py +43 -0
  259. webscout/search/engines/yep/__init__.py +13 -0
  260. webscout/search/engines/yep/base.py +32 -0
  261. webscout/search/engines/yep/images.py +99 -0
  262. webscout/search/engines/yep/suggestions.py +35 -0
  263. webscout/search/engines/yep/text.py +114 -0
  264. webscout/search/http_client.py +156 -0
  265. webscout/search/results.py +137 -0
  266. webscout/search/yep_main.py +44 -0
  267. webscout/swiftcli/Readme.md +323 -323
  268. webscout/swiftcli/__init__.py +95 -95
  269. webscout/swiftcli/core/__init__.py +7 -7
  270. webscout/swiftcli/core/cli.py +308 -308
  271. webscout/swiftcli/core/context.py +104 -104
  272. webscout/swiftcli/core/group.py +241 -241
  273. webscout/swiftcli/decorators/__init__.py +28 -28
  274. webscout/swiftcli/decorators/command.py +221 -221
  275. webscout/swiftcli/decorators/options.py +220 -220
  276. webscout/swiftcli/decorators/output.py +302 -302
  277. webscout/swiftcli/exceptions.py +21 -21
  278. webscout/swiftcli/plugins/__init__.py +9 -9
  279. webscout/swiftcli/plugins/base.py +135 -135
  280. webscout/swiftcli/plugins/manager.py +269 -269
  281. webscout/swiftcli/utils/__init__.py +59 -59
  282. webscout/swiftcli/utils/formatting.py +252 -252
  283. webscout/swiftcli/utils/parsing.py +267 -267
  284. webscout/update_checker.py +117 -117
  285. webscout/version.py +1 -1
  286. webscout/version.py.bak +2 -0
  287. webscout/zeroart/README.md +89 -89
  288. webscout/zeroart/__init__.py +134 -134
  289. webscout/zeroart/base.py +66 -66
  290. webscout/zeroart/effects.py +100 -100
  291. webscout/zeroart/fonts.py +1238 -1238
  292. {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
  293. webscout-2025.10.13.dist-info/RECORD +329 -0
  294. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  295. webscout/Provider/OPENAI/Qwen3.py +0 -303
  296. webscout/Provider/OPENAI/qodo.py +0 -630
  297. webscout/Provider/OPENAI/xenai.py +0 -514
  298. webscout/tempid.py +0 -134
  299. webscout/webscout_search.py +0 -1183
  300. webscout/webscout_search_async.py +0 -649
  301. webscout/yep_search.py +0 -346
  302. webscout-8.3.7.dist-info/RECORD +0 -301
  303. {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
  304. {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
  305. {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
  306. {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,8 @@
1
- """
2
- Scout: A powerful, zero-dependency web scraping library
3
- """
4
-
5
- from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
6
- from .element import Tag, NavigableString
7
-
8
- __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
1
+ """
2
+ Scout: A powerful, zero-dependency web scraping library
3
+ """
4
+
5
+ from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
6
+ from .element import Tag, NavigableString
7
+
8
+ __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
@@ -1,7 +1,7 @@
1
- from .text_analyzer import ScoutTextAnalyzer
2
- from .web_analyzer import ScoutWebAnalyzer
3
- from .search_result import ScoutSearchResult
4
- from .crawler import ScoutCrawler
5
- from .scout import Scout
6
-
1
+ from .text_analyzer import ScoutTextAnalyzer
2
+ from .web_analyzer import ScoutWebAnalyzer
3
+ from .search_result import ScoutSearchResult
4
+ from .crawler import ScoutCrawler
5
+ from .scout import Scout
6
+
7
7
  __all__ = ['ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult', 'ScoutCrawler', 'Scout']
@@ -1,297 +1,297 @@
1
- """
2
- Scout Crawler Module - Ultra Advanced Web Crawling System
3
- """
4
-
5
- import concurrent.futures
6
- import urllib.parse
7
- import time
8
- import hashlib
9
- import re
10
- import json
11
- import sqlite3
12
- import threading
13
- import queue
14
- import logging
15
- import mimetypes
16
- import pickle
17
- import asyncio
18
- import aiohttp
19
- import random
20
- from urllib import robotparser
21
- from datetime import datetime, timedelta
22
- from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
23
- from collections import defaultdict, deque
24
- from dataclasses import dataclass, field
25
- from enum import Enum
26
- from pathlib import Path
27
-
28
- try:
29
- from webscout.litagent import LitAgent
30
- except ImportError:
31
- LitAgent = None
32
-
33
- try:
34
- from curl_cffi.requests import Session
35
- except ImportError:
36
- import requests
37
- Session = requests.Session
38
-
39
- from .scout import Scout
40
- from .text_analyzer import ScoutTextAnalyzer
41
-
42
-
43
- @dataclass
44
- class CrawlConfig:
45
- """Configuration for the crawler."""
46
- max_pages: int = 1000
47
- max_depth: int = 10
48
- delay: float = 0.5
49
- obey_robots: bool = True
50
- crawl_subdomains: bool = True
51
- max_workers: int = 10
52
- timeout: int = 30
53
- retry_attempts: int = 3
54
- include_external_links: bool = False
55
- extract_metadata: bool = True
56
- extract_structured_data: bool = True
57
- extract_semantic_content: bool = True
58
-
59
-
60
- @dataclass
61
- class PageData:
62
- """Comprehensive page data for LLM training."""
63
- url: str
64
- title: str
65
- text: str
66
- clean_text: str
67
- markdown_text: str
68
- links: List[str]
69
- internal_links: List[str]
70
- external_links: List[str]
71
- metadata: Dict[str, Any]
72
- structured_data: Dict[str, Any]
73
- semantic_content: Dict[str, Any]
74
- headers: Dict[str, str]
75
- status_code: int
76
- content_type: str
77
- language: str
78
- timestamp: str
79
- depth: int
80
- word_count: int
81
-
82
-
83
- class ScoutCrawler:
84
- """
85
- Ultra-advanced web crawling utility optimized for LLM data collection.
86
- """
87
- def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
88
- """
89
- Initialize the web crawler.
90
-
91
- Args:
92
- base_url (str): Starting URL to crawl
93
- max_pages (int, optional): Maximum number of pages to crawl
94
- tags_to_remove (List[str], optional): List of tags to remove
95
- """
96
- self.base_url = base_url
97
- self.max_pages = max_pages
98
- self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
99
- "script",
100
- "style"
101
- ]
102
- self.visited_urls = set()
103
- self.crawled_pages = []
104
- self.session = session or Session()
105
- self.agent = LitAgent()
106
- # Use all headers and generate fingerprint
107
- self.session.headers = self.agent.generate_fingerprint()
108
- self.session.headers.setdefault("User-Agent", self.agent.chrome())
109
- self.delay = delay
110
- self.obey_robots = obey_robots
111
- # Allow crawling of subdomains by default
112
- base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
113
- self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
114
- self.allowed_domains = allowed_domains or [self.base_domain]
115
- self.last_request_time = 0
116
- self.url_hashes = set()
117
- if obey_robots:
118
- self.robots = robotparser.RobotFileParser()
119
- robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
120
- try:
121
- self.robots.set_url(robots_url)
122
- self.robots.read()
123
- except Exception:
124
- self.robots = None
125
- else:
126
- self.robots = None
127
-
128
- def _normalize_url(self, url: str) -> str:
129
- url = url.split('#')[0]
130
- url = re.sub(r'\?.*$', '', url) # Remove query params
131
- return url.rstrip('/')
132
-
133
- def _is_valid_url(self, url: str) -> bool:
134
- """
135
- Check if a URL is valid and within the same domain.
136
-
137
- Args:
138
- url (str): URL to validate
139
-
140
- Returns:
141
- bool: Whether the URL is valid
142
- """
143
- try:
144
- parsed_base = urllib.parse.urlparse(self.base_url)
145
- parsed_url = urllib.parse.urlparse(url)
146
- if parsed_url.scheme not in ["http", "https"]:
147
- return False
148
- # Allow crawling subdomains
149
- if not parsed_url.netloc.endswith(self.base_domain):
150
- return False
151
- if self.obey_robots and self.robots:
152
- return self.robots.can_fetch("*", url)
153
- return True
154
- except Exception:
155
- return False
156
-
157
- def _is_duplicate(self, url: str) -> bool:
158
- norm = self._normalize_url(url)
159
- url_hash = hashlib.md5(norm.encode()).hexdigest()
160
- if url_hash in self.url_hashes:
161
- return True
162
- self.url_hashes.add(url_hash)
163
- return False
164
-
165
- def _extract_main_text(self, soup):
166
- # Try to extract main content (simple heuristic)
167
- main = soup.find('main')
168
- if main:
169
- return main.get_text(separator=" ", strip=True)
170
- article = soup.find('article')
171
- if article:
172
- return article.get_text(separator=" ", strip=True)
173
- # fallback to body
174
- body = soup.find('body')
175
- if body:
176
- return body.get_text(separator=" ", strip=True)
177
- return soup.get_text(separator=" ", strip=True)
178
-
179
- def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
180
- """
181
- Crawl a single page and extract information.
182
-
183
- Args:
184
- url (str): URL to crawl
185
- depth (int, optional): Current crawl depth
186
-
187
- Returns:
188
- Dict[str, Union[str, List[str]]]: Crawled page information
189
- """
190
- if url in self.visited_urls or self._is_duplicate(url):
191
- return {}
192
- # Log URL to crawl
193
- print(f"Attempting to crawl URL: {url} (depth: {depth})")
194
-
195
- # Throttle requests
196
- now = time.time()
197
- if self.last_request_time:
198
- elapsed = now - self.last_request_time
199
- if elapsed < self.delay:
200
- time.sleep(self.delay - elapsed)
201
- self.last_request_time = time.time()
202
- try:
203
- response = self.session.get(url, timeout=10)
204
- response.raise_for_status()
205
- if not response.headers.get('Content-Type', '').startswith('text/html'):
206
- return {}
207
- scout = Scout(response.content, features="lxml")
208
- title_result = scout.find("title")
209
- title = title_result[0].get_text() if title_result else ""
210
-
211
- # Remove only script and style tags before extracting text
212
- for tag_name in self.tags_to_remove:
213
- for tag in scout._soup.find_all(tag_name):
214
- tag.decompose()
215
-
216
- visible_text = self._extract_main_text(scout._soup)
217
-
218
- # Extract links from header, footer, nav, etc.
219
- essential_links = []
220
- for essential_tag in ['header', 'nav', 'footer']:
221
- elements = scout.find_all(essential_tag)
222
- for element in elements:
223
- links = element.find_all('a', href=True)
224
- essential_links.extend(
225
- urllib.parse.urljoin(url, link.get('href'))
226
- for link in links
227
- if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
228
- )
229
-
230
- all_links = [
231
- urllib.parse.urljoin(url, link.get('href'))
232
- for link in scout.find_all('a', href=True)
233
- if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
234
- ]
235
-
236
- combined_links = list(set(all_links + essential_links))
237
-
238
- page_info = {
239
- 'url': url,
240
- 'title': title,
241
- 'links': combined_links,
242
- 'text': visible_text,
243
- 'depth': depth,
244
- 'timestamp': datetime.utcnow().isoformat(),
245
- 'headers': dict(response.headers),
246
- }
247
- self.visited_urls.add(url)
248
- self.crawled_pages.append(page_info)
249
- return page_info
250
- except Exception as e:
251
- print(f"Error crawling {url}: {e}")
252
- return {}
253
-
254
- def crawl(self):
255
- """
256
- Start web crawling from base URL and yield each crawled page in real time.
257
-
258
- Yields:
259
- Dict[str, Union[str, List[str]]]: Crawled page information
260
- """
261
- with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
262
- futures = {executor.submit(self._crawl_page, self.base_url, 0)}
263
- submitted_links: set[str] = set()
264
-
265
- while futures:
266
- if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
267
- break
268
- done, not_done = concurrent.futures.wait(
269
- futures, return_when=concurrent.futures.FIRST_COMPLETED
270
- )
271
- futures = not_done
272
-
273
- for future in done:
274
- page_info = future.result()
275
-
276
- if page_info:
277
- yield page_info
278
-
279
- if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
280
- return
281
-
282
- for link in page_info.get("links", []):
283
- if (
284
- (self.max_pages is None or len(self.visited_urls) < self.max_pages)
285
- and link not in self.visited_urls
286
- and link not in submitted_links
287
- ):
288
- submitted_links.add(link)
289
- futures.add(
290
- executor.submit(
291
- self._crawl_page,
292
- link,
293
- page_info.get("depth", 0) + 1,
294
- )
295
- )
296
- else:
297
- print(f"No page info retrieved from crawling")
1
+ """
2
+ Scout Crawler Module - Ultra Advanced Web Crawling System
3
+ """
4
+
5
+ import concurrent.futures
6
+ import urllib.parse
7
+ import time
8
+ import hashlib
9
+ import re
10
+ import json
11
+ import sqlite3
12
+ import threading
13
+ import queue
14
+ import logging
15
+ import mimetypes
16
+ import pickle
17
+ import asyncio
18
+ import aiohttp
19
+ import random
20
+ from urllib import robotparser
21
+ from datetime import datetime, timedelta
22
+ from typing import Dict, List, Optional, Union, Set, Tuple, Callable, Any
23
+ from collections import defaultdict, deque
24
+ from dataclasses import dataclass, field
25
+ from enum import Enum
26
+ from pathlib import Path
27
+
28
+ try:
29
+ from webscout.litagent import LitAgent
30
+ except ImportError:
31
+ LitAgent = None
32
+
33
+ try:
34
+ from curl_cffi.requests import Session
35
+ except ImportError:
36
+ import requests
37
+ Session = requests.Session
38
+
39
+ from .scout import Scout
40
+ from .text_analyzer import ScoutTextAnalyzer
41
+
42
+
43
+ @dataclass
44
+ class CrawlConfig:
45
+ """Configuration for the crawler."""
46
+ max_pages: int = 1000
47
+ max_depth: int = 10
48
+ delay: float = 0.5
49
+ obey_robots: bool = True
50
+ crawl_subdomains: bool = True
51
+ max_workers: int = 10
52
+ timeout: int = 30
53
+ retry_attempts: int = 3
54
+ include_external_links: bool = False
55
+ extract_metadata: bool = True
56
+ extract_structured_data: bool = True
57
+ extract_semantic_content: bool = True
58
+
59
+
60
+ @dataclass
61
+ class PageData:
62
+ """Comprehensive page data for LLM training."""
63
+ url: str
64
+ title: str
65
+ text: str
66
+ clean_text: str
67
+ markdown_text: str
68
+ links: List[str]
69
+ internal_links: List[str]
70
+ external_links: List[str]
71
+ metadata: Dict[str, Any]
72
+ structured_data: Dict[str, Any]
73
+ semantic_content: Dict[str, Any]
74
+ headers: Dict[str, str]
75
+ status_code: int
76
+ content_type: str
77
+ language: str
78
+ timestamp: str
79
+ depth: int
80
+ word_count: int
81
+
82
+
83
+ class ScoutCrawler:
84
+ """
85
+ Ultra-advanced web crawling utility optimized for LLM data collection.
86
+ """
87
+ def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
88
+ """
89
+ Initialize the web crawler.
90
+
91
+ Args:
92
+ base_url (str): Starting URL to crawl
93
+ max_pages (int, optional): Maximum number of pages to crawl
94
+ tags_to_remove (List[str], optional): List of tags to remove
95
+ """
96
+ self.base_url = base_url
97
+ self.max_pages = max_pages
98
+ self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
99
+ "script",
100
+ "style"
101
+ ]
102
+ self.visited_urls = set()
103
+ self.crawled_pages = []
104
+ self.session = session or Session()
105
+ self.agent = LitAgent()
106
+ # Use all headers and generate fingerprint
107
+ self.session.headers = self.agent.generate_fingerprint()
108
+ self.session.headers.setdefault("User-Agent", self.agent.chrome())
109
+ self.delay = delay
110
+ self.obey_robots = obey_robots
111
+ # Allow crawling of subdomains by default
112
+ base_domain = urllib.parse.urlparse(base_url).netloc.split('.')
113
+ self.base_domain = '.'.join(base_domain[-2:]) if len(base_domain) > 1 else base_domain[0]
114
+ self.allowed_domains = allowed_domains or [self.base_domain]
115
+ self.last_request_time = 0
116
+ self.url_hashes = set()
117
+ if obey_robots:
118
+ self.robots = robotparser.RobotFileParser()
119
+ robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
120
+ try:
121
+ self.robots.set_url(robots_url)
122
+ self.robots.read()
123
+ except Exception:
124
+ self.robots = None
125
+ else:
126
+ self.robots = None
127
+
128
+ def _normalize_url(self, url: str) -> str:
129
+ url = url.split('#')[0]
130
+ url = re.sub(r'\?.*$', '', url) # Remove query params
131
+ return url.rstrip('/')
132
+
133
+ def _is_valid_url(self, url: str) -> bool:
134
+ """
135
+ Check if a URL is valid and within the same domain.
136
+
137
+ Args:
138
+ url (str): URL to validate
139
+
140
+ Returns:
141
+ bool: Whether the URL is valid
142
+ """
143
+ try:
144
+ parsed_base = urllib.parse.urlparse(self.base_url)
145
+ parsed_url = urllib.parse.urlparse(url)
146
+ if parsed_url.scheme not in ["http", "https"]:
147
+ return False
148
+ # Allow crawling subdomains
149
+ if not parsed_url.netloc.endswith(self.base_domain):
150
+ return False
151
+ if self.obey_robots and self.robots:
152
+ return self.robots.can_fetch("*", url)
153
+ return True
154
+ except Exception:
155
+ return False
156
+
157
+ def _is_duplicate(self, url: str) -> bool:
158
+ norm = self._normalize_url(url)
159
+ url_hash = hashlib.md5(norm.encode()).hexdigest()
160
+ if url_hash in self.url_hashes:
161
+ return True
162
+ self.url_hashes.add(url_hash)
163
+ return False
164
+
165
+ def _extract_main_text(self, soup):
166
+ # Try to extract main content (simple heuristic)
167
+ main = soup.find('main')
168
+ if main:
169
+ return main.get_text(separator=" ", strip=True)
170
+ article = soup.find('article')
171
+ if article:
172
+ return article.get_text(separator=" ", strip=True)
173
+ # fallback to body
174
+ body = soup.find('body')
175
+ if body:
176
+ return body.get_text(separator=" ", strip=True)
177
+ return soup.get_text(separator=" ", strip=True)
178
+
179
+ def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
180
+ """
181
+ Crawl a single page and extract information.
182
+
183
+ Args:
184
+ url (str): URL to crawl
185
+ depth (int, optional): Current crawl depth
186
+
187
+ Returns:
188
+ Dict[str, Union[str, List[str]]]: Crawled page information
189
+ """
190
+ if url in self.visited_urls or self._is_duplicate(url):
191
+ return {}
192
+ # Log URL to crawl
193
+ print(f"Attempting to crawl URL: {url} (depth: {depth})")
194
+
195
+ # Throttle requests
196
+ now = time.time()
197
+ if self.last_request_time:
198
+ elapsed = now - self.last_request_time
199
+ if elapsed < self.delay:
200
+ time.sleep(self.delay - elapsed)
201
+ self.last_request_time = time.time()
202
+ try:
203
+ response = self.session.get(url, timeout=10)
204
+ response.raise_for_status()
205
+ if not response.headers.get('Content-Type', '').startswith('text/html'):
206
+ return {}
207
+ scout = Scout(response.content, features="lxml")
208
+ title_result = scout.find("title")
209
+ title = title_result[0].get_text() if title_result else ""
210
+
211
+ # Remove only script and style tags before extracting text
212
+ for tag_name in self.tags_to_remove:
213
+ for tag in scout._soup.find_all(tag_name):
214
+ tag.decompose()
215
+
216
+ visible_text = self._extract_main_text(scout._soup)
217
+
218
+ # Extract links from header, footer, nav, etc.
219
+ essential_links = []
220
+ for essential_tag in ['header', 'nav', 'footer']:
221
+ elements = scout.find_all(essential_tag)
222
+ for element in elements:
223
+ links = element.find_all('a', href=True)
224
+ essential_links.extend(
225
+ urllib.parse.urljoin(url, link.get('href'))
226
+ for link in links
227
+ if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
228
+ )
229
+
230
+ all_links = [
231
+ urllib.parse.urljoin(url, link.get('href'))
232
+ for link in scout.find_all('a', href=True)
233
+ if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
234
+ ]
235
+
236
+ combined_links = list(set(all_links + essential_links))
237
+
238
+ page_info = {
239
+ 'url': url,
240
+ 'title': title,
241
+ 'links': combined_links,
242
+ 'text': visible_text,
243
+ 'depth': depth,
244
+ 'timestamp': datetime.utcnow().isoformat(),
245
+ 'headers': dict(response.headers),
246
+ }
247
+ self.visited_urls.add(url)
248
+ self.crawled_pages.append(page_info)
249
+ return page_info
250
+ except Exception as e:
251
+ print(f"Error crawling {url}: {e}")
252
+ return {}
253
+
254
+ def crawl(self):
255
+ """
256
+ Start web crawling from base URL and yield each crawled page in real time.
257
+
258
+ Yields:
259
+ Dict[str, Union[str, List[str]]]: Crawled page information
260
+ """
261
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
262
+ futures = {executor.submit(self._crawl_page, self.base_url, 0)}
263
+ submitted_links: set[str] = set()
264
+
265
+ while futures:
266
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
267
+ break
268
+ done, not_done = concurrent.futures.wait(
269
+ futures, return_when=concurrent.futures.FIRST_COMPLETED
270
+ )
271
+ futures = not_done
272
+
273
+ for future in done:
274
+ page_info = future.result()
275
+
276
+ if page_info:
277
+ yield page_info
278
+
279
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
280
+ return
281
+
282
+ for link in page_info.get("links", []):
283
+ if (
284
+ (self.max_pages is None or len(self.visited_urls) < self.max_pages)
285
+ and link not in self.visited_urls
286
+ and link not in submitted_links
287
+ ):
288
+ submitted_links.add(link)
289
+ futures.add(
290
+ executor.submit(
291
+ self._crawl_page,
292
+ link,
293
+ page_info.get("depth", 0) + 1,
294
+ )
295
+ )
296
+ else:
297
+ print(f"No page info retrieved from crawling")