webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (413) hide show
  1. webscout/AIauto.py +524 -251
  2. webscout/AIbase.py +247 -319
  3. webscout/AIutel.py +68 -703
  4. webscout/Bard.py +1072 -1026
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +403 -232
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -684
  37. webscout/Extra/tempmail/README.md +487 -487
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +292 -333
  49. webscout/Provider/AISEARCH/README.md +106 -279
  50. webscout/Provider/AISEARCH/__init__.py +16 -9
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +357 -410
  53. webscout/Provider/AISEARCH/monica_search.py +200 -220
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -342
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +467 -340
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +175 -169
  67. webscout/Provider/GithubChat.py +385 -369
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -801
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -375
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -291
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -384
  77. webscout/Provider/Netwrck.py +273 -270
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -952
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -357
  85. webscout/Provider/OPENAI/__init__.py +148 -40
  86. webscout/Provider/OPENAI/ai4chat.py +348 -293
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
  90. webscout/Provider/OPENAI/base.py +253 -249
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +870 -556
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -173
  94. webscout/Provider/OPENAI/deepinfra.py +403 -322
  95. webscout/Provider/OPENAI/e2b.py +2370 -1414
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +452 -417
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -364
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +333 -308
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -335
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +374 -357
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -287
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +305 -304
  115. webscout/Provider/OPENAI/textpollinations.py +370 -339
  116. webscout/Provider/OPENAI/toolbaz.py +375 -413
  117. webscout/Provider/OPENAI/typefully.py +419 -355
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -318
  120. webscout/Provider/OPENAI/wisecat.py +359 -387
  121. webscout/Provider/OPENAI/writecream.py +185 -163
  122. webscout/Provider/OPENAI/x0gpt.py +462 -365
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -429
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -82
  133. webscout/Provider/TTI/__init__.py +37 -7
  134. webscout/Provider/TTI/base.py +147 -64
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -201
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -221
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -11
  141. webscout/Provider/TTS/README.md +186 -192
  142. webscout/Provider/TTS/__init__.py +43 -10
  143. webscout/Provider/TTS/base.py +523 -159
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -129
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -580
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +331 -308
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -475
  158. webscout/Provider/TypliAI.py +311 -305
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -209
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
  161. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
  162. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
  165. webscout/Provider/UNFINISHED/Youchat.py +347 -330
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
  170. webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
  171. webscout/Provider/WiseCat.py +256 -233
  172. webscout/Provider/WrDoChat.py +390 -370
  173. webscout/Provider/__init__.py +115 -174
  174. webscout/Provider/ai4chat.py +181 -174
  175. webscout/Provider/akashgpt.py +330 -335
  176. webscout/Provider/cerebras.py +397 -290
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -283
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -325
  182. webscout/Provider/llama3mitril.py +230 -215
  183. webscout/Provider/llmchat.py +308 -258
  184. webscout/Provider/llmchatco.py +321 -306
  185. webscout/Provider/meta.py +996 -801
  186. webscout/Provider/oivscode.py +332 -309
  187. webscout/Provider/searchchat.py +316 -292
  188. webscout/Provider/sonus.py +264 -258
  189. webscout/Provider/toolbaz.py +359 -353
  190. webscout/Provider/turboseek.py +332 -266
  191. webscout/Provider/typefully.py +262 -202
  192. webscout/Provider/x0gpt.py +332 -299
  193. webscout/__init__.py +31 -39
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -524
  196. webscout/client.py +1497 -70
  197. webscout/conversation.py +140 -436
  198. webscout/exceptions.py +383 -362
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +74 -420
  204. webscout/prompt_manager.py +376 -288
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -404
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -210
  210. webscout/scout/core/scout.py +800 -607
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -478
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -95
  284. webscout/swiftcli/core/__init__.py +7 -7
  285. webscout/swiftcli/core/cli.py +574 -297
  286. webscout/swiftcli/core/context.py +98 -104
  287. webscout/swiftcli/core/group.py +268 -241
  288. webscout/swiftcli/decorators/__init__.py +28 -28
  289. webscout/swiftcli/decorators/command.py +243 -221
  290. webscout/swiftcli/decorators/options.py +247 -220
  291. webscout/swiftcli/decorators/output.py +392 -252
  292. webscout/swiftcli/exceptions.py +21 -21
  293. webscout/swiftcli/plugins/__init__.py +9 -9
  294. webscout/swiftcli/plugins/base.py +134 -135
  295. webscout/swiftcli/plugins/manager.py +269 -269
  296. webscout/swiftcli/utils/__init__.py +58 -59
  297. webscout/swiftcli/utils/formatting.py +251 -252
  298. webscout/swiftcli/utils/parsing.py +368 -267
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -135
  304. webscout/zeroart/base.py +70 -66
  305. webscout/zeroart/effects.py +155 -101
  306. webscout/zeroart/fonts.py +1799 -1239
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
  311. webscout/DWEBS.py +0 -520
  312. webscout/Extra/Act.md +0 -309
  313. webscout/Extra/GitToolkit/gitapi/README.md +0 -110
  314. webscout/Extra/autocoder/__init__.py +0 -9
  315. webscout/Extra/autocoder/autocoder.py +0 -1105
  316. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  317. webscout/Extra/gguf.md +0 -430
  318. webscout/Extra/weather.md +0 -281
  319. webscout/Litlogger/README.md +0 -10
  320. webscout/Litlogger/__init__.py +0 -15
  321. webscout/Litlogger/formats.py +0 -4
  322. webscout/Litlogger/handlers.py +0 -103
  323. webscout/Litlogger/levels.py +0 -13
  324. webscout/Litlogger/logger.py +0 -92
  325. webscout/Provider/AI21.py +0 -177
  326. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  327. webscout/Provider/AISEARCH/felo_search.py +0 -202
  328. webscout/Provider/AISEARCH/genspark_search.py +0 -324
  329. webscout/Provider/AISEARCH/hika_search.py +0 -186
  330. webscout/Provider/AISEARCH/scira_search.py +0 -298
  331. webscout/Provider/Aitopia.py +0 -316
  332. webscout/Provider/AllenAI.py +0 -440
  333. webscout/Provider/Blackboxai.py +0 -791
  334. webscout/Provider/ChatGPTClone.py +0 -237
  335. webscout/Provider/ChatGPTGratis.py +0 -194
  336. webscout/Provider/Cloudflare.py +0 -324
  337. webscout/Provider/ExaChat.py +0 -358
  338. webscout/Provider/Flowith.py +0 -217
  339. webscout/Provider/FreeGemini.py +0 -250
  340. webscout/Provider/Glider.py +0 -225
  341. webscout/Provider/HF_space/__init__.py +0 -0
  342. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  343. webscout/Provider/HuggingFaceChat.py +0 -469
  344. webscout/Provider/Hunyuan.py +0 -283
  345. webscout/Provider/LambdaChat.py +0 -411
  346. webscout/Provider/Llama3.py +0 -259
  347. webscout/Provider/Nemotron.py +0 -218
  348. webscout/Provider/OLLAMA.py +0 -396
  349. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
  350. webscout/Provider/OPENAI/Cloudflare.py +0 -378
  351. webscout/Provider/OPENAI/FreeGemini.py +0 -283
  352. webscout/Provider/OPENAI/NEMOTRON.py +0 -232
  353. webscout/Provider/OPENAI/Qwen3.py +0 -283
  354. webscout/Provider/OPENAI/api.py +0 -969
  355. webscout/Provider/OPENAI/c4ai.py +0 -373
  356. webscout/Provider/OPENAI/chatgptclone.py +0 -494
  357. webscout/Provider/OPENAI/copilot.py +0 -242
  358. webscout/Provider/OPENAI/flowith.py +0 -162
  359. webscout/Provider/OPENAI/freeaichat.py +0 -359
  360. webscout/Provider/OPENAI/mcpcore.py +0 -389
  361. webscout/Provider/OPENAI/multichat.py +0 -376
  362. webscout/Provider/OPENAI/opkfc.py +0 -496
  363. webscout/Provider/OPENAI/scirachat.py +0 -477
  364. webscout/Provider/OPENAI/standardinput.py +0 -433
  365. webscout/Provider/OPENAI/typegpt.py +0 -364
  366. webscout/Provider/OPENAI/uncovrAI.py +0 -463
  367. webscout/Provider/OPENAI/venice.py +0 -431
  368. webscout/Provider/OPENAI/yep.py +0 -382
  369. webscout/Provider/OpenGPT.py +0 -209
  370. webscout/Provider/Perplexitylabs.py +0 -415
  371. webscout/Provider/Reka.py +0 -214
  372. webscout/Provider/StandardInput.py +0 -290
  373. webscout/Provider/TTI/aiarta.py +0 -365
  374. webscout/Provider/TTI/artbit.py +0 -0
  375. webscout/Provider/TTI/fastflux.py +0 -200
  376. webscout/Provider/TTI/piclumen.py +0 -203
  377. webscout/Provider/TTI/pixelmuse.py +0 -225
  378. webscout/Provider/TTS/gesserit.py +0 -128
  379. webscout/Provider/TTS/sthir.py +0 -94
  380. webscout/Provider/TeachAnything.py +0 -229
  381. webscout/Provider/UNFINISHED/puterjs.py +0 -635
  382. webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
  383. webscout/Provider/Venice.py +0 -258
  384. webscout/Provider/VercelAI.py +0 -253
  385. webscout/Provider/Writecream.py +0 -246
  386. webscout/Provider/WritingMate.py +0 -269
  387. webscout/Provider/asksteve.py +0 -220
  388. webscout/Provider/chatglm.py +0 -215
  389. webscout/Provider/copilot.py +0 -425
  390. webscout/Provider/freeaichat.py +0 -285
  391. webscout/Provider/granite.py +0 -235
  392. webscout/Provider/hermes.py +0 -266
  393. webscout/Provider/koala.py +0 -170
  394. webscout/Provider/lmarena.py +0 -198
  395. webscout/Provider/multichat.py +0 -364
  396. webscout/Provider/scira_chat.py +0 -299
  397. webscout/Provider/scnet.py +0 -243
  398. webscout/Provider/talkai.py +0 -194
  399. webscout/Provider/typegpt.py +0 -289
  400. webscout/Provider/uncovr.py +0 -368
  401. webscout/Provider/yep.py +0 -389
  402. webscout/litagent/Readme.md +0 -276
  403. webscout/litprinter/__init__.py +0 -59
  404. webscout/swiftcli/Readme.md +0 -323
  405. webscout/tempid.py +0 -128
  406. webscout/webscout_search.py +0 -1184
  407. webscout/webscout_search_async.py +0 -654
  408. webscout/yep_search.py +0 -347
  409. webscout/zeroart/README.md +0 -89
  410. webscout-8.2.9.dist-info/METADATA +0 -1033
  411. webscout-8.2.9.dist-info/RECORD +0 -289
  412. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
  413. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,8 @@
1
- """
2
- Scout: A powerful, zero-dependency web scraping library
3
- """
4
-
5
- from .core import Scout, ScoutCrawler, ScoutTextAnalyzer, ScoutWebAnalyzer, ScoutSearchResult
6
- from .element import Tag, NavigableString
7
-
8
- __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
1
+ """
2
+ Scout: A powerful, zero-dependency web scraping library
3
+ """
4
+
5
+ from .core import Scout, ScoutCrawler, ScoutSearchResult, ScoutTextAnalyzer, ScoutWebAnalyzer
6
+ from .element import NavigableString, Tag
7
+
8
+ __all__ = ['Scout', 'ScoutCrawler', 'Tag', 'NavigableString','ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult']
@@ -1,7 +1,7 @@
1
- from .text_analyzer import ScoutTextAnalyzer
2
- from .web_analyzer import ScoutWebAnalyzer
3
- from .search_result import ScoutSearchResult
4
- from .crawler import ScoutCrawler
5
- from .scout import Scout
6
-
7
- __all__ = ['ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult', 'ScoutCrawler', 'Scout']
1
+ from .crawler import ScoutCrawler
2
+ from .scout import Scout
3
+ from .search_result import ScoutSearchResult
4
+ from .text_analyzer import ScoutTextAnalyzer
5
+ from .web_analyzer import ScoutWebAnalyzer
6
+
7
+ __all__ = ['ScoutTextAnalyzer', 'ScoutWebAnalyzer', 'ScoutSearchResult', 'ScoutCrawler', 'Scout']
@@ -1,210 +1,330 @@
1
- """
2
- Scout Crawler Module
3
- """
4
-
5
- import concurrent.futures
6
- import urllib.parse
7
- import time
8
- import hashlib
9
- import re
10
- from urllib import robotparser
11
- from datetime import datetime
12
- from typing import Dict, List, Optional, Union
13
- from webscout.litagent import LitAgent
14
- from curl_cffi.requests import Session
15
-
16
- from .scout import Scout
17
-
18
-
19
- class ScoutCrawler:
20
- """
21
- Advanced web crawling utility for Scout library.
22
- """
23
- def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: List[str] = None, session: Optional[Session] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
24
- """
25
- Initialize the web crawler.
26
-
27
- Args:
28
- base_url (str): Starting URL to crawl
29
- max_pages (int, optional): Maximum number of pages to crawl
30
- tags_to_remove (List[str], optional): List of tags to remove
31
- """
32
- self.base_url = base_url
33
- self.max_pages = max_pages
34
- self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
35
- "script",
36
- "style",
37
- "header",
38
- "footer",
39
- "nav",
40
- "aside",
41
- "form",
42
- "button",
43
- ]
44
- self.visited_urls = set()
45
- self.crawled_pages = []
46
- self.session = session or Session()
47
- self.agent = LitAgent()
48
- # Use all headers and generate fingerprint
49
- self.session.headers = self.agent.generate_fingerprint()
50
- self.session.headers.setdefault("User-Agent", self.agent.chrome())
51
- self.delay = delay
52
- self.obey_robots = obey_robots
53
- self.allowed_domains = allowed_domains or [urllib.parse.urlparse(base_url).netloc]
54
- self.last_request_time = 0
55
- self.url_hashes = set()
56
- if obey_robots:
57
- self.robots = robotparser.RobotFileParser()
58
- robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
59
- try:
60
- self.robots.set_url(robots_url)
61
- self.robots.read()
62
- except Exception:
63
- self.robots = None
64
- else:
65
- self.robots = None
66
-
67
- def _normalize_url(self, url: str) -> str:
68
- url = url.split('#')[0]
69
- url = re.sub(r'\?.*$', '', url) # Remove query params
70
- return url.rstrip('/')
71
-
72
- def _is_valid_url(self, url: str) -> bool:
73
- """
74
- Check if a URL is valid and within the same domain.
75
-
76
- Args:
77
- url (str): URL to validate
78
-
79
- Returns:
80
- bool: Whether the URL is valid
81
- """
82
- try:
83
- parsed_base = urllib.parse.urlparse(self.base_url)
84
- parsed_url = urllib.parse.urlparse(url)
85
- if parsed_url.scheme not in ["http", "https"]:
86
- return False
87
- if parsed_url.netloc not in self.allowed_domains:
88
- return False
89
- if self.obey_robots and self.robots:
90
- return self.robots.can_fetch("*", url)
91
- return True
92
- except Exception:
93
- return False
94
-
95
- def _is_duplicate(self, url: str) -> bool:
96
- norm = self._normalize_url(url)
97
- url_hash = hashlib.md5(norm.encode()).hexdigest()
98
- if url_hash in self.url_hashes:
99
- return True
100
- self.url_hashes.add(url_hash)
101
- return False
102
-
103
- def _extract_main_text(self, soup):
104
- # Try to extract main content (simple heuristic)
105
- main = soup.find('main')
106
- if main:
107
- return main.get_text(separator=" ", strip=True)
108
- article = soup.find('article')
109
- if article:
110
- return article.get_text(separator=" ", strip=True)
111
- # fallback to body
112
- body = soup.find('body')
113
- if body:
114
- return body.get_text(separator=" ", strip=True)
115
- return soup.get_text(separator=" ", strip=True)
116
-
117
- def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Union[str, List[str]]]:
118
- """
119
- Crawl a single page and extract information.
120
-
121
- Args:
122
- url (str): URL to crawl
123
- depth (int, optional): Current crawl depth
124
-
125
- Returns:
126
- Dict[str, Union[str, List[str]]]: Crawled page information
127
- """
128
- if url in self.visited_urls or self._is_duplicate(url):
129
- return {}
130
- # Throttle requests
131
- now = time.time()
132
- if self.last_request_time:
133
- elapsed = now - self.last_request_time
134
- if elapsed < self.delay:
135
- time.sleep(self.delay - elapsed)
136
- self.last_request_time = time.time()
137
- try:
138
- response = self.session.get(url, timeout=10)
139
- response.raise_for_status()
140
- if not response.headers.get('Content-Type', '').startswith('text/html'):
141
- return {}
142
- scout = Scout(response.content, features="lxml")
143
- title_result = scout.find("title")
144
- title = title_result[0].get_text() if title_result else ""
145
- for tag_name in self.tags_to_remove:
146
- for tag in scout._soup.find_all(tag_name):
147
- tag.extract()
148
- visible_text = self._extract_main_text(scout._soup)
149
- page_info = {
150
- 'url': url,
151
- 'title': title,
152
- 'links': [
153
- urllib.parse.urljoin(url, link.get('href'))
154
- for link in scout.find_all('a', href=True)
155
- if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
156
- ],
157
- 'text': visible_text,
158
- 'depth': depth,
159
- 'timestamp': datetime.utcnow().isoformat(),
160
- 'headers': dict(response.headers),
161
- }
162
- self.visited_urls.add(url)
163
- self.crawled_pages.append(page_info)
164
- return page_info
165
- except Exception as e:
166
- print(f"Error crawling {url}: {e}")
167
- return {}
168
-
169
- def crawl(self):
170
- """
171
- Start web crawling from base URL and yield each crawled page in real time.
172
-
173
- Yields:
174
- Dict[str, Union[str, List[str]]]: Crawled page information
175
- """
176
- with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
177
- futures = {executor.submit(self._crawl_page, self.base_url, 0)}
178
- submitted_links: set[str] = set()
179
-
180
- while futures:
181
- if len(self.visited_urls) >= self.max_pages:
182
- break
183
- done, not_done = concurrent.futures.wait(
184
- futures, return_when=concurrent.futures.FIRST_COMPLETED
185
- )
186
- futures = not_done
187
-
188
- for future in done:
189
- page_info = future.result()
190
-
191
- if page_info:
192
- yield page_info
193
-
194
- if len(self.visited_urls) >= self.max_pages:
195
- return
196
-
197
- for link in page_info.get("links", []):
198
- if (
199
- len(self.visited_urls) < self.max_pages
200
- and link not in self.visited_urls
201
- and link not in submitted_links
202
- ):
203
- submitted_links.add(link)
204
- futures.add(
205
- executor.submit(
206
- self._crawl_page,
207
- link,
208
- page_info.get("depth", 0) + 1,
209
- )
210
- )
1
+ """
2
+ Scout Crawler Module - Ultra Advanced Web Crawling System
3
+ """
4
+
5
+ import concurrent.futures
6
+ import hashlib
7
+ import time
8
+ import urllib.parse
9
+ from dataclasses import dataclass
10
+ from datetime import datetime
11
+ from typing import Any, Dict, List, Optional, Set, Union
12
+ from urllib import robotparser
13
+
14
+ try:
15
+ from webscout.litagent import LitAgent
16
+ except ImportError:
17
+ LitAgent: Any = None
18
+
19
+ try:
20
+ from curl_cffi.requests import Session
21
+ except ImportError:
22
+ import requests
23
+ Session: Any = requests.Session
24
+
25
+ from ..parsers import ParserRegistry
26
+ from .scout import Scout
27
+
28
+
29
+ @dataclass
30
+ class CrawlConfig:
31
+ """Configuration for the crawler."""
32
+ max_pages: int = 1000
33
+ max_depth: int = 10
34
+ delay: float = 0.5
35
+ obey_robots: bool = True
36
+ crawl_subdomains: bool = True
37
+ max_workers: int = 10
38
+ timeout: int = 30
39
+ retry_attempts: int = 3
40
+ include_external_links: bool = False
41
+ extract_metadata: bool = True
42
+ extract_structured_data: bool = True
43
+ extract_semantic_content: bool = True
44
+
45
+
46
+ @dataclass
47
+ class PageData:
48
+ """Comprehensive page data for LLM training."""
49
+ url: str
50
+ title: str
51
+ text: str
52
+ clean_text: str
53
+ markdown_text: str
54
+ links: List[str]
55
+ internal_links: List[str]
56
+ external_links: List[str]
57
+ metadata: Dict[str, Any]
58
+ structured_data: Dict[str, Any]
59
+ semantic_content: Dict[str, Any]
60
+ headers: Dict[str, str]
61
+ status_code: int
62
+ content_type: str
63
+ language: str
64
+ timestamp: str
65
+ depth: int
66
+ word_count: int
67
+
68
+
69
+ class ScoutCrawler:
70
+ """
71
+ Ultra-advanced web crawling utility optimized for LLM data collection.
72
+ """
73
+ def __init__(self, base_url: str, max_pages: int = 50, tags_to_remove: Optional[List[str]] = None, session: Optional[Any] = None, delay: float = 0.5, obey_robots: bool = True, allowed_domains: Optional[List[str]] = None):
74
+ """
75
+ Initialize the web crawler.
76
+
77
+ Args:
78
+ base_url (str): Starting URL to crawl
79
+ max_pages (int, optional): Maximum number of pages to crawl
80
+ tags_to_remove (List[str], optional): List of tags to remove
81
+ """
82
+ self.base_url = base_url
83
+ self.max_pages = max_pages
84
+ self.tags_to_remove = tags_to_remove if tags_to_remove is not None else [
85
+ "script",
86
+ "style"
87
+ ]
88
+ self.visited_urls = set()
89
+ self.crawled_pages = []
90
+ self.session = session or Session()
91
+ # LitAgent may not be available in minimal installs - provide a safe fallback
92
+ if LitAgent is not None:
93
+ self.agent = LitAgent()
94
+ else:
95
+ class _SimpleAgent:
96
+ def generate_fingerprint(self) -> Dict[str, str]:
97
+ return {"user_agent": "Mozilla/5.0"}
98
+
99
+ def chrome(self) -> str:
100
+ return "Mozilla/5.0"
101
+
102
+ self.agent = _SimpleAgent()
103
+
104
+ # Use fingerprint to update session headers (normalize keys)
105
+ fingerprint = self.agent.generate_fingerprint()
106
+ headers: Dict[str, str] = {}
107
+ if isinstance(fingerprint, dict):
108
+ for k, v in fingerprint.items():
109
+ if k == "user_agent":
110
+ headers["User-Agent"] = str(v)
111
+ else:
112
+ headers[k.replace("_", "-").title()] = str(v)
113
+ try:
114
+ self.session.headers.update(headers)
115
+ except Exception:
116
+ # Some session implementations may not expose update() directly
117
+ for hk, hv in headers.items():
118
+ try:
119
+ self.session.headers[hk] = hv
120
+ except Exception:
121
+ pass
122
+
123
+ # Ensure a User-Agent is always present
124
+ try:
125
+ self.session.headers.setdefault("User-Agent", self.agent.chrome())
126
+ except Exception:
127
+ pass
128
+ self.delay = delay
129
+ self.obey_robots = obey_robots
130
+ self.features = "lxml" if "lxml" in ParserRegistry.list_parsers() else "html.parser"
131
+
132
+ # Secure domain handling
133
+ parsed_base = urllib.parse.urlparse(base_url)
134
+ self.base_netloc = parsed_base.netloc
135
+ base_domain_parts = self.base_netloc.split('.')
136
+ self.base_domain = '.'.join(base_domain_parts[-2:]) if len(base_domain_parts) > 1 else self.base_netloc
137
+
138
+ self.allowed_domains = allowed_domains or [self.base_netloc]
139
+ self.last_request_time = 0
140
+ self.url_hashes = set()
141
+
142
+ if obey_robots:
143
+ self.robots = robotparser.RobotFileParser()
144
+ robots_url = urllib.parse.urljoin(base_url, '/robots.txt')
145
+ try:
146
+ # Use session for robots.txt to respect headers/UA
147
+ robots_resp = self.session.get(robots_url, timeout=5)
148
+ if robots_resp.status_code == 200:
149
+ self.robots.parse(robots_resp.text.splitlines())
150
+ else:
151
+ self.robots = None
152
+ except Exception:
153
+ self.robots = None
154
+ else:
155
+ self.robots = None
156
+
157
+ def _normalize_url(self, url: str) -> str:
158
+ """Normalize URL by removing fragments and trailing slashes."""
159
+ url = url.split('#')[0]
160
+ return url.rstrip('/')
161
+
162
+ def _is_valid_url(self, url: str) -> bool:
163
+ """
164
+ Check if a URL is valid and within allowed domains.
165
+ """
166
+ try:
167
+ parsed_url = urllib.parse.urlparse(url)
168
+ if parsed_url.scheme not in ["http", "https"]:
169
+ return False
170
+
171
+ # Secure domain check
172
+ target_netloc = parsed_url.netloc.lower()
173
+ is_allowed = False
174
+ for allowed in self.allowed_domains:
175
+ if target_netloc == allowed.lower() or target_netloc.endswith('.' + allowed.lower()):
176
+ is_allowed = True
177
+ break
178
+
179
+ if not is_allowed:
180
+ return False
181
+
182
+ if self.obey_robots and self.robots:
183
+ # Ensure we pass a str user-agent to robotparser.can_fetch
184
+ ua = str(self.session.headers.get("User-Agent", "*"))
185
+ return self.robots.can_fetch(ua, url)
186
+ return True
187
+ except Exception:
188
+ return False
189
+
190
+ def _is_duplicate(self, url: str) -> bool:
191
+ norm = self._normalize_url(url)
192
+ url_hash = hashlib.md5(norm.encode()).hexdigest()
193
+ if url_hash in self.url_hashes:
194
+ return True
195
+ self.url_hashes.add(url_hash)
196
+ return False
197
+
198
+ def _extract_main_text(self, soup):
199
+ # Try to extract main content (simple heuristic)
200
+ main = soup.find('main')
201
+ if main:
202
+ return main.get_text(separator=" ", strip=True)
203
+ article = soup.find('article')
204
+ if article:
205
+ return article.get_text(separator=" ", strip=True)
206
+ # fallback to body
207
+ body = soup.find('body')
208
+ if body:
209
+ return body.get_text(separator=" ", strip=True)
210
+ return soup.get_text(separator=" ", strip=True)
211
+
212
+ def _crawl_page(self, url: str, depth: int = 0) -> Dict[str, Any]:
213
+ """
214
+ Crawl a single page and extract information.
215
+
216
+ Args:
217
+ url (str): URL to crawl
218
+ depth (int, optional): Current crawl depth
219
+
220
+ Returns:
221
+ Dict[str, Any]: Crawled page information
222
+ """
223
+ if url in self.visited_urls or self._is_duplicate(url):
224
+ return {}
225
+ # Log URL to crawl
226
+ print(f"Attempting to crawl URL: {url} (depth: {depth})")
227
+
228
+ # Throttle requests
229
+ now = time.time()
230
+ if self.last_request_time:
231
+ elapsed = now - self.last_request_time
232
+ if elapsed < self.delay:
233
+ time.sleep(self.delay - elapsed)
234
+ self.last_request_time = time.time()
235
+ try:
236
+ response = self.session.get(url, timeout=10)
237
+ response.raise_for_status()
238
+ if not response.headers.get('Content-Type', '').startswith('text/html'):
239
+ return {}
240
+ scout = Scout(response.content, features=self.features)
241
+ title_tag = scout.find("title")
242
+ title = title_tag.get_text() if title_tag else ""
243
+
244
+ # Remove only script and style tags before extracting text
245
+ for tag_name in self.tags_to_remove:
246
+ for tag in scout._soup.find_all(tag_name):
247
+ tag.decompose()
248
+
249
+ visible_text = self._extract_main_text(scout._soup)
250
+
251
+ # Extract links from header, footer, nav, etc.
252
+ essential_links = []
253
+ for essential_tag in ['header', 'nav', 'footer']:
254
+ elements = scout.find_all(essential_tag)
255
+ for element in elements:
256
+ links = element.find_all('a', href=True)
257
+ essential_links.extend(
258
+ urllib.parse.urljoin(url, link.get('href'))
259
+ for link in links
260
+ if link.get('href') and self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
261
+ )
262
+
263
+ all_links = [
264
+ urllib.parse.urljoin(url, link.get('href'))
265
+ for link in scout.find_all('a', href=True)
266
+ if self._is_valid_url(urllib.parse.urljoin(url, link.get('href')))
267
+ ]
268
+
269
+ combined_links = list(set(all_links + essential_links))
270
+
271
+ page_info = {
272
+ 'url': url,
273
+ 'title': title,
274
+ 'links': combined_links,
275
+ 'text': visible_text,
276
+ 'depth': depth,
277
+ 'timestamp': datetime.now().isoformat(),
278
+ 'headers': dict(response.headers),
279
+ }
280
+ self.visited_urls.add(url)
281
+ self.crawled_pages.append(page_info)
282
+ return page_info
283
+ except Exception as e:
284
+ print(f"Error crawling {url}: {e}")
285
+ return {}
286
+
287
+ def crawl(self):
288
+ """
289
+ Start web crawling from base URL and yield each crawled page in real time.
290
+
291
+ Yields:
292
+ Dict[str, Union[str, List[str]]]: Crawled page information
293
+ """
294
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
295
+ futures = {executor.submit(self._crawl_page, self.base_url, 0)}
296
+ submitted_links: Set[str] = set()
297
+
298
+ while futures:
299
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
300
+ break
301
+ done, not_done = concurrent.futures.wait(
302
+ futures, return_when=concurrent.futures.FIRST_COMPLETED
303
+ )
304
+ futures = not_done
305
+
306
+ for future in done:
307
+ page_info = future.result()
308
+
309
+ if page_info:
310
+ yield page_info
311
+
312
+ if self.max_pages is not None and len(self.visited_urls) >= self.max_pages:
313
+ return
314
+
315
+ for link in page_info.get("links", []):
316
+ if (
317
+ (self.max_pages is None or len(self.visited_urls) < self.max_pages)
318
+ and link not in self.visited_urls
319
+ and link not in submitted_links
320
+ ):
321
+ submitted_links.add(link)
322
+ futures.add(
323
+ executor.submit(
324
+ self._crawl_page,
325
+ link,
326
+ int(page_info.get("depth", 0)) + 1,
327
+ )
328
+ )
329
+ else:
330
+ print("No page info retrieved from crawling")