webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,299 @@
1
+ from os import system
2
+ from curl_cffi import CurlError
3
+ from curl_cffi.requests import Session
4
+ import json
5
+ import uuid
6
+ import re
7
+ from typing import Any, Dict, Optional, Union, List
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class SciraAI(Provider):
16
+ """
17
+ A class to interact with the Scira AI chat API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = {
21
+ "scira-default": "Grok3-mini", # thinking model
22
+ "scira-grok-3": "Grok3",
23
+ "scira-anthropic": "Sonnet 3.7 thinking",
24
+ "scira-vision" : "Grok2-Vision", # vision model
25
+ "scira-4o": "GPT4o",
26
+ "scira-qwq": "QWQ-32B",
27
+ "scira-o4-mini": "o4-mini",
28
+ "scira-google": "gemini 2.5 flash",
29
+ "scira-google-pro": "gemini 2.5 pro",
30
+ "scira-llama-4": "llama 4 Maverick",
31
+ }
32
+
33
+ def __init__(
34
+ self,
35
+ is_conversation: bool = True,
36
+ max_tokens: int = 2049,
37
+ timeout: int = 30,
38
+ intro: str = None,
39
+ filepath: str = None,
40
+ update_file: bool = True,
41
+ proxies: dict = {},
42
+ history_offset: int = 10250,
43
+ act: str = None,
44
+ model: str = "scira-default",
45
+ chat_id: str = None,
46
+ user_id: str = None,
47
+ browser: str = "chrome",
48
+ system_prompt: str = "You are a helpful assistant.",
49
+ ):
50
+ """Initializes the Scira AI API client.
51
+
52
+ Args:
53
+ is_conversation (bool): Whether to maintain conversation history.
54
+ max_tokens (int): Maximum number of tokens to generate.
55
+ timeout (int): Request timeout in seconds.
56
+ intro (str): Introduction text for the conversation.
57
+ filepath (str): Path to save conversation history.
58
+ update_file (bool): Whether to update the conversation history file.
59
+ proxies (dict): Proxy configuration for requests.
60
+ history_offset (int): Maximum history length in characters.
61
+ act (str): Persona for the AI to adopt.
62
+ model (str): Model to use, must be one of AVAILABLE_MODELS.
63
+ chat_id (str): Unique identifier for the chat session.
64
+ user_id (str): Unique identifier for the user.
65
+ browser (str): Browser to emulate in requests.
66
+ system_prompt (str): System prompt for the AI.
67
+
68
+ """
69
+ if model not in self.AVAILABLE_MODELS:
70
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
71
+
72
+ self.url = "https://scira.ai/api/search"
73
+
74
+ # Initialize LitAgent for user agent generation
75
+ self.agent = LitAgent()
76
+ # Use fingerprinting to create a consistent browser identity
77
+ self.fingerprint = self.agent.generate_fingerprint(browser)
78
+ self.system_prompt = system_prompt
79
+
80
+ # Use the fingerprint for headers
81
+ self.headers = {
82
+ "Accept": self.fingerprint["accept"],
83
+ "Accept-Encoding": "gzip, deflate, br, zstd",
84
+ "Accept-Language": self.fingerprint["accept_language"],
85
+ "Content-Type": "application/json",
86
+ "Origin": "https://scira.ai",
87
+ "Referer": "https://scira.ai/",
88
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
89
+ "Sec-CH-UA-Mobile": "?0",
90
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
91
+ "User-Agent": self.fingerprint["user_agent"],
92
+ "Sec-Fetch-Dest": "empty",
93
+ "Sec-Fetch-Mode": "cors",
94
+ "Sec-Fetch-Site": "same-origin"
95
+ }
96
+
97
+ self.session = Session() # Use curl_cffi Session
98
+ self.session.headers.update(self.headers)
99
+ self.session.proxies = proxies # Assign proxies directly
100
+
101
+ self.is_conversation = is_conversation
102
+ self.max_tokens_to_sample = max_tokens
103
+ self.timeout = timeout
104
+ self.last_response = {}
105
+ self.model = model
106
+ self.chat_id = chat_id or str(uuid.uuid4())
107
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
108
+
109
+ # Always use chat mode (no web search)
110
+ self.search_mode = "chat"
111
+
112
+ self.__available_optimizers = (
113
+ method
114
+ for method in dir(Optimizers)
115
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
116
+ )
117
+ Conversation.intro = (
118
+ AwesomePrompts().get_act(
119
+ act, raise_not_found=True, default=None, case_insensitive=True
120
+ )
121
+ if act
122
+ else intro or Conversation.intro
123
+ )
124
+
125
+ self.conversation = Conversation(
126
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
127
+ )
128
+ self.conversation.history_offset = history_offset
129
+
130
+ def refresh_identity(self, browser: str = None):
131
+ """
132
+ Refreshes the browser identity fingerprint.
133
+
134
+ Args:
135
+ browser: Specific browser to use for the new fingerprint
136
+ """
137
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
138
+ self.fingerprint = self.agent.generate_fingerprint(browser)
139
+
140
+ # Update headers with new fingerprint
141
+ self.headers.update({
142
+ "Accept": self.fingerprint["accept"],
143
+ "Accept-Language": self.fingerprint["accept_language"],
144
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
145
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
146
+ "User-Agent": self.fingerprint["user_agent"],
147
+ })
148
+
149
+ # Update session headers
150
+ for header, value in self.headers.items():
151
+ self.session.headers[header] = value
152
+
153
+ return self.fingerprint
154
+
155
+ @staticmethod
156
+ def _scira_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
157
+ """Extracts content from the Scira stream format '0:"..."'."""
158
+ if isinstance(chunk, str):
159
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
160
+ if match:
161
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
162
+ content = match.group(1).encode().decode('unicode_escape')
163
+ return content.replace('\\\\', '\\').replace('\\"', '"')
164
+ return None
165
+
166
+ def ask(
167
+ self,
168
+ prompt: str,
169
+ optimizer: str = None,
170
+ conversationally: bool = False,
171
+ ) -> Dict[str, Any]: # Note: Stream parameter removed as API doesn't seem to support it
172
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
173
+ if optimizer:
174
+ if optimizer in self.__available_optimizers:
175
+ conversation_prompt = getattr(Optimizers, optimizer)(
176
+ conversation_prompt if conversationally else prompt
177
+ )
178
+ else:
179
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
180
+
181
+ messages = [
182
+ {"role": "system", "content": self.system_prompt},
183
+ {"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
184
+ ]
185
+
186
+ # Prepare the request payload
187
+ payload = {
188
+ "id": self.chat_id,
189
+ "messages": messages,
190
+ "model": self.model,
191
+ "group": self.search_mode,
192
+ "user_id": self.user_id,
193
+ "timezone": "Asia/Calcutta"
194
+ }
195
+
196
+ try:
197
+ # Use curl_cffi post with impersonate
198
+ response = self.session.post(
199
+ self.url,
200
+ json=payload,
201
+ timeout=self.timeout,
202
+ impersonate="chrome120" # Add impersonate
203
+ )
204
+ if response.status_code != 200:
205
+ # Try to get response content for better error messages
206
+ try: # Use try-except for reading response content
207
+ error_content = response.text
208
+ except:
209
+ error_content = "<could not read response content>"
210
+
211
+ if response.status_code in [403, 429]:
212
+ print(f"Received status code {response.status_code}, refreshing identity...")
213
+ self.refresh_identity()
214
+ response = self.session.post(
215
+ self.url, json=payload, timeout=self.timeout,
216
+ impersonate="chrome120" # Add impersonate to retry
217
+ )
218
+ if not response.ok:
219
+ raise exceptions.FailedToGenerateResponseError(
220
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
221
+ )
222
+ print("Identity refreshed successfully.")
223
+ else:
224
+ raise exceptions.FailedToGenerateResponseError(
225
+ f"Request failed with status code {response.status_code}. Response: {error_content}"
226
+ )
227
+
228
+ response_text_raw = response.text # Get raw response text
229
+
230
+ # Process the text using sanitize_stream line by line
231
+ processed_stream = sanitize_stream(
232
+ data=response_text_raw.splitlines(), # Split into lines
233
+ intro_value=None, # No simple prefix
234
+ to_json=False, # Content is not JSON
235
+ content_extractor=self._scira_extractor # Use the specific extractor
236
+ )
237
+
238
+ # Aggregate the results from the generator
239
+ full_response = ""
240
+ for content in processed_stream:
241
+ if content and isinstance(content, str):
242
+ full_response += content
243
+
244
+ self.last_response = {"text": full_response}
245
+ self.conversation.update_chat_history(prompt, full_response)
246
+ return {"text": full_response}
247
+ except CurlError as e: # Catch CurlError
248
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
249
+ except Exception as e:
250
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
251
+
252
+ def chat(
253
+ self,
254
+ prompt: str,
255
+ optimizer: str = None,
256
+ conversationally: bool = False,
257
+ ) -> str:
258
+ return self.get_message(
259
+ self.ask(
260
+ prompt, optimizer=optimizer, conversationally=conversationally
261
+ )
262
+ )
263
+
264
+ def get_message(self, response: dict) -> str:
265
+ assert isinstance(response, dict), "Response should be of dict data-type only"
266
+ # Extractor handles formatting
267
+ return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
268
+
269
+ if __name__ == "__main__":
270
+ print("-" * 100)
271
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
272
+ print("-" * 100)
273
+
274
+ test_prompt = "Say 'Hello' in one word"
275
+
276
+ # Test each model
277
+ for model in SciraAI.AVAILABLE_MODELS:
278
+ print(f"\rTesting {model}...", end="")
279
+
280
+ try:
281
+ test_ai = SciraAI(model=model, timeout=120) # Increased timeout
282
+ response = test_ai.chat(test_prompt)
283
+
284
+ if response and len(response.strip()) > 0:
285
+ status = "✓"
286
+ # Clean and truncate response
287
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
288
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
289
+ else:
290
+ status = "✗"
291
+ display_text = "Empty or invalid response"
292
+
293
+ print(f"\r{model:<50} {status:<10} {display_text}")
294
+ except Exception as e:
295
+ error_msg = str(e)
296
+ # Truncate very long error messages
297
+ if len(error_msg) > 100:
298
+ error_msg = error_msg[:97] + "..."
299
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
@@ -0,0 +1,243 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import secrets
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+
11
+ class SCNet(Provider):
12
+ """
13
+ Provider for SCNet chatbot API.
14
+ """
15
+ AVAILABLE_MODELS = [
16
+ {"modelId": 2, "name": "Deepseek-r1-7B"},
17
+ {"modelId": 3, "name": "Deepseek-r1-32B"},
18
+ {"modelId": 5, "name": "Deepseek-r1-70B"},
19
+ {"modelId": 7, "name": "QWQ-32B"},
20
+ {"modelId": 8, "name": "minimax-text-01-456B"},
21
+ # Add more models here as needed
22
+ ]
23
+ MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
24
+ MODEL_ID_TO_NAME = {m["modelId"]: m["name"] for m in AVAILABLE_MODELS}
25
+
26
+ def __init__(
27
+ self,
28
+ model: str = "QWQ-32B",
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
31
+ timeout: int = 30,
32
+ intro: Optional[str] = ("You are a helpful, advanced LLM assistant. "
33
+ "You must always answer in English, regardless of the user's language. "
34
+ "If the user asks in another language, politely respond in English only. "
35
+ "Be clear, concise, and helpful."),
36
+ filepath: Optional[str] = None,
37
+ update_file: bool = True,
38
+ proxies: Optional[dict] = None,
39
+ history_offset: int = 0, # Note: history_offset might not be fully effective due to API structure
40
+ act: Optional[str] = None,
41
+ system_prompt: str = (
42
+ "You are a helpful, advanced LLM assistant. "
43
+ "You must always answer in English, regardless of the user's language. "
44
+ "If the user asks in another language, politely respond in English only. "
45
+ "Be clear, concise, and helpful."
46
+ ),
47
+ ):
48
+ if model not in self.MODEL_NAME_TO_ID:
49
+ raise ValueError(f"Invalid model: {model}. Choose from: {list(self.MODEL_NAME_TO_ID.keys())}")
50
+ self.model = model
51
+ self.modelId = self.MODEL_NAME_TO_ID[model]
52
+ self.system_prompt = system_prompt
53
+ # Initialize curl_cffi Session
54
+ self.session = Session()
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response: Dict[str, Any] = {}
59
+ self.proxies = proxies or {}
60
+ self.cookies = {
61
+ "Token": secrets.token_hex(16), # Keep cookie generation logic
62
+ }
63
+ self.headers = {
64
+ "accept": "text/event-stream",
65
+ "content-type": "application/json",
66
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
67
+ "referer": "https://www.scnet.cn/ui/chatbot/temp_1744712663464",
68
+ "origin": "https://www.scnet.cn",
69
+ # Add sec-ch-ua headers if needed for impersonation consistency
70
+ }
71
+ self.url = "https://www.scnet.cn/acx/chatbot/v1/chat/completion"
72
+
73
+ # Update curl_cffi session headers, proxies, and cookies
74
+ self.session.headers.update(self.headers)
75
+ self.session.proxies = self.proxies # Assign proxies directly
76
+ # Set cookies on the session object for curl_cffi
77
+ for name, value in self.cookies.items():
78
+ self.session.cookies.set(name, value)
79
+
80
+ self.__available_optimizers = (
81
+ method for method in dir(Optimizers)
82
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
83
+ )
84
+ Conversation.intro = (
85
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
86
+ if act
87
+ else intro or Conversation.intro
88
+ )
89
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
90
+ self.conversation.history_offset = history_offset
91
+
92
+ @staticmethod
93
+ def _scnet_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
94
+ """Extracts content from SCNet stream JSON objects."""
95
+ if isinstance(chunk, dict):
96
+ return chunk.get("content")
97
+ return None
98
+
99
+ def ask(
100
+ self,
101
+ prompt: str,
102
+ stream: bool = False,
103
+ raw: bool = False,
104
+ optimizer: Optional[str] = None,
105
+ conversationally: bool = False,
106
+ ) -> Union[Dict[str, Any], Generator]:
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
+ if optimizer:
109
+ if optimizer in self.__available_optimizers:
110
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
111
+ else:
112
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {list(self.__available_optimizers)}")
113
+
114
+ payload = {
115
+ "conversationId": "",
116
+ "content": f"SYSTEM: {self.system_prompt} USER: {conversation_prompt}",
117
+ "thinking": 0,
118
+ "online": 0,
119
+ "modelId": self.modelId,
120
+ "textFile": [],
121
+ "imageFile": [],
122
+ "clusterId": ""
123
+ }
124
+
125
+ def for_stream():
126
+ try:
127
+ # Use curl_cffi session post with impersonate
128
+ # Cookies are now handled by the session object
129
+ response = self.session.post(
130
+ self.url,
131
+ json=payload,
132
+ stream=True,
133
+ timeout=self.timeout,
134
+ impersonate="chrome120" # Changed impersonation to chrome120
135
+ )
136
+ response.raise_for_status() # Check for HTTP errors
137
+
138
+ streaming_text = ""
139
+ # Use sanitize_stream
140
+ processed_stream = sanitize_stream(
141
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
142
+ intro_value="data:",
143
+ to_json=True, # Stream sends JSON
144
+ skip_markers=["[done]"],
145
+ content_extractor=self._scnet_extractor, # Use the specific extractor
146
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
147
+ )
148
+
149
+ for content_chunk in processed_stream:
150
+ # content_chunk is the string extracted by _scnet_extractor
151
+ if content_chunk and isinstance(content_chunk, str):
152
+ streaming_text += content_chunk
153
+ yield {"text": content_chunk} if not raw else content_chunk
154
+ # Update history and last response after stream finishes
155
+ self.last_response = {"text": streaming_text}
156
+ self.conversation.update_chat_history(prompt, streaming_text)
157
+
158
+ except CurlError as e: # Catch CurlError
159
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
160
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
161
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
162
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
163
+
164
+ def for_non_stream():
165
+ # Aggregate the stream using the updated for_stream logic
166
+ text = ""
167
+ # Ensure raw=False so for_stream yields dicts
168
+ for chunk_data in for_stream():
169
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
170
+ text += chunk_data["text"]
171
+ # Handle raw string case if raw=True was passed
172
+ elif isinstance(chunk_data, str):
173
+ text += chunk_data
174
+ # last_response and history are updated within for_stream
175
+ # Return the final aggregated response dict or raw string
176
+ return text if raw else self.last_response
177
+
178
+
179
+ return for_stream() if stream else for_non_stream()
180
+
181
+ def chat(
182
+ self,
183
+ prompt: str,
184
+ stream: bool = False,
185
+ optimizer: Optional[str] = None,
186
+ conversationally: bool = False,
187
+ ) -> Union[str, Generator[str, None, None]]:
188
+ def for_stream_chat():
189
+ # ask() yields dicts or strings when streaming
190
+ gen = self.ask(
191
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
192
+ optimizer=optimizer, conversationally=conversationally
193
+ )
194
+ for response_dict in gen:
195
+ yield self.get_message(response_dict) # get_message expects dict
196
+
197
+ def for_non_stream_chat():
198
+ # ask() returns dict or str when not streaming
199
+ response_data = self.ask(
200
+ prompt, stream=False, raw=False, # Ensure ask returns dict
201
+ optimizer=optimizer, conversationally=conversationally
202
+ )
203
+ return self.get_message(response_data) # get_message expects dict
204
+
205
+ return for_stream_chat() if stream else for_non_stream_chat()
206
+
207
+ def get_message(self, response: dict) -> str:
208
+ assert isinstance(response, dict), "Response should be of dict data-type only"
209
+ return response["text"]
210
+
211
+ if __name__ == "__main__":
212
+ # Ensure curl_cffi is installed
213
+ print("-" * 80)
214
+ print(f"{'ModelId':<10} {'Model':<30} {'Status':<10} {'Response'}")
215
+ print("-" * 80)
216
+ for model in SCNet.AVAILABLE_MODELS:
217
+ try:
218
+ test_ai = SCNet(model=model["name"], timeout=60)
219
+ # Test stream first
220
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
221
+ response_text = ""
222
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {'Streaming...':<10}", end="", flush=True)
223
+ for chunk in response_stream:
224
+ response_text += chunk
225
+
226
+ if response_text and len(response_text.strip()) > 0:
227
+ status = "✓"
228
+ # Clean and truncate response
229
+ clean_text = response_text.strip()
230
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
231
+ else:
232
+ status = "✗ (Stream)"
233
+ display_text = "Empty or invalid stream response"
234
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {status:<10} {display_text}")
235
+
236
+ # Optional: Add non-stream test if needed
237
+ # print(f"\r{model['modelId']:<10} {model['name']:<30} {'Non-Stream...':<10}", end="", flush=True)
238
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
239
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
240
+ # print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗ (Non-Stream)':<10} Empty non-stream response")
241
+
242
+ except Exception as e:
243
+ print(f"\r{model['modelId']:<10} {model['name']:<30} {'✗':<10} {str(e)}")