webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,289 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ from typing import Union, Any, Dict, Generator
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class TypeGPT(Provider):
14
+ """
15
+ A class to interact with the TypeGPT.net API. Improved to match webscout standards.
16
+ """
17
+ AVAILABLE_MODELS = [
18
+ # Working Models (based on testing)
19
+ "gpt-4o-mini-2024-07-18",
20
+ "chatgpt-4o-latest",
21
+ "deepseek-r1",
22
+ "deepseek-v3",
23
+ "uncensored-r1",
24
+ "Image-Generator",
25
+ ]
26
+
27
+ def __init__(
28
+ self,
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 4000, # Set a reasonable default
31
+ timeout: int = 30,
32
+ intro: str = None,
33
+ filepath: str = None,
34
+ update_file: bool = True,
35
+ proxies: dict = {},
36
+ history_offset: int = 10250,
37
+ act: str = None,
38
+ model: str = "gpt-4o-mini-2024-07-18",
39
+ system_prompt: str = "You are a helpful assistant.",
40
+ temperature: float = 0.5,
41
+ presence_penalty: int = 0,
42
+ frequency_penalty: int = 0,
43
+ top_p: float = 1,
44
+ ):
45
+ """Initializes the TypeGPT API client."""
46
+ if model not in self.AVAILABLE_MODELS:
47
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
48
+
49
+ # Initialize curl_cffi Session
50
+ self.session = Session()
51
+ self.is_conversation = is_conversation
52
+ self.max_tokens_to_sample = max_tokens
53
+ self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
54
+ self.timeout = timeout
55
+ self.last_response = {}
56
+ self.model = model
57
+ self.system_prompt = system_prompt
58
+ self.temperature = temperature
59
+ self.presence_penalty = presence_penalty
60
+ self.frequency_penalty = frequency_penalty
61
+ self.top_p = top_p
62
+ self.headers = {
63
+ "authority": "chat.typegpt.net",
64
+ "accept": "application/json, text/event-stream",
65
+ "accept-language": "en-US,en;q=0.9",
66
+ "content-type": "application/json",
67
+ "origin": "https://chat.typegpt.net",
68
+ "referer": "https://chat.typegpt.net/",
69
+ "user-agent": LitAgent().random()
70
+ }
71
+
72
+ self.__available_optimizers = (
73
+ method
74
+ for method in dir(Optimizers)
75
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
76
+ )
77
+ Conversation.intro = (
78
+ AwesomePrompts().get_act(
79
+ act, raise_not_found=True, default=None, case_insensitive=True
80
+ )
81
+ if act
82
+ else intro or Conversation.intro
83
+ )
84
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
85
+ self.conversation.history_offset = history_offset
86
+ # Update curl_cffi session headers and proxies
87
+ self.session.headers.update(self.headers)
88
+ self.session.proxies = proxies
89
+
90
+ def ask(
91
+ self,
92
+ prompt: str,
93
+ stream: bool = False,
94
+ raw: bool = False,
95
+ optimizer: str = None,
96
+ conversationally: bool = False,
97
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
98
+ """Sends a prompt to the TypeGPT.net API and returns the response."""
99
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
100
+ if optimizer:
101
+ if optimizer in self.__available_optimizers:
102
+ conversation_prompt = getattr(Optimizers, optimizer)(
103
+ conversation_prompt if conversationally else prompt
104
+ )
105
+ else:
106
+ raise exceptions.FailedToGenerateResponseError(
107
+ f"Optimizer is not one of {self.__available_optimizers}"
108
+ )
109
+
110
+ payload = {
111
+ "messages": [
112
+ {"role": "system", "content": self.system_prompt},
113
+ {"role": "user", "content": conversation_prompt}
114
+ ],
115
+ "stream": stream,
116
+ "model": self.model,
117
+ "temperature": self.temperature,
118
+ "presence_penalty": self.presence_penalty,
119
+ "frequency_penalty": self.frequency_penalty,
120
+ "top_p": self.top_p,
121
+ "max_tokens": self.max_tokens_to_sample,
122
+ }
123
+
124
+ def for_stream():
125
+ try:
126
+ # Use curl_cffi session post with impersonate
127
+ response = self.session.post(
128
+ self.api_endpoint,
129
+ headers=self.headers,
130
+ json=payload,
131
+ stream=True,
132
+ timeout=self.timeout,
133
+ impersonate="chrome120"
134
+ )
135
+ except CurlError as ce:
136
+ raise exceptions.FailedToGenerateResponseError(
137
+ f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
138
+ ) from ce
139
+
140
+ response.raise_for_status() # Check for HTTP errors first
141
+
142
+ streaming_text = ""
143
+ # Use sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
146
+ intro_value="data:",
147
+ to_json=True, # Stream sends JSON
148
+ skip_markers=["[DONE]"],
149
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
150
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
151
+ )
152
+
153
+ for content_chunk in processed_stream:
154
+ # content_chunk is the string extracted by the content_extractor
155
+ if content_chunk and isinstance(content_chunk, str):
156
+ streaming_text += content_chunk
157
+ yield dict(text=content_chunk) if not raw else content_chunk
158
+ # Update last_response incrementally
159
+ self.last_response = dict(text=streaming_text)
160
+
161
+ # Update conversation history after stream finishes
162
+ if streaming_text: # Only update if something was received
163
+ self.conversation.update_chat_history(prompt, streaming_text)
164
+
165
+
166
+ def for_non_stream():
167
+ try:
168
+ # Use curl_cffi session post with impersonate
169
+ response = self.session.post(
170
+ self.api_endpoint,
171
+ headers=self.headers,
172
+ json=payload,
173
+ timeout=self.timeout,
174
+ impersonate="chrome120"
175
+ )
176
+ except CurlError as ce:
177
+ raise exceptions.FailedToGenerateResponseError(
178
+ f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
179
+ ) from ce
180
+
181
+ response.raise_for_status() # Check for HTTP errors
182
+
183
+ try:
184
+ response_text = response.text # Get raw text
185
+
186
+ # Use sanitize_stream for non-streaming JSON response
187
+ processed_stream = sanitize_stream(
188
+ data=response_text,
189
+ to_json=True, # Parse the whole text as JSON
190
+ intro_value=None,
191
+ # Extractor for non-stream structure
192
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
193
+ yield_raw_on_error=False
194
+ )
195
+
196
+ # Extract the single result
197
+ content = ""
198
+ for extracted_content in processed_stream:
199
+ content = extracted_content if isinstance(extracted_content, str) else ""
200
+
201
+ self.last_response = {"text": content} # Store in expected format
202
+ self.conversation.update_chat_history(prompt, content)
203
+ return self.last_response
204
+ except (json.JSONDecodeError, Exception) as je: # Catch potential JSON errors or others
205
+ raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
206
+
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def chat(
211
+ self,
212
+ prompt: str,
213
+ stream: bool = False,
214
+ optimizer: str = None,
215
+ conversationally: bool = False,
216
+ ) -> Union[str, Generator[str, None, None]]:
217
+ """Generate response string or stream."""
218
+ if stream:
219
+ # ask() yields dicts or strings when streaming
220
+ gen = self.ask(
221
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
222
+ optimizer=optimizer, conversationally=conversationally
223
+ )
224
+ for chunk_dict in gen:
225
+ # get_message expects a dict
226
+ yield self.get_message(chunk_dict)
227
+ else:
228
+ # ask() returns a dict when not streaming
229
+ response_dict = self.ask(
230
+ prompt, stream=False,
231
+ optimizer=optimizer, conversationally=conversationally
232
+ )
233
+ return self.get_message(response_dict)
234
+
235
+ def get_message(self, response: Dict[str, Any]) -> str:
236
+ """Retrieves message from response."""
237
+ if isinstance(response, dict):
238
+ assert isinstance(response, dict), "Response should be of dict data-type only"
239
+ # Handle potential unicode escapes in the final text
240
+ text = response.get("text", "")
241
+ try:
242
+ # Attempt to decode escapes, return original if fails
243
+ return text.encode('utf-8').decode('unicode_escape')
244
+ except UnicodeDecodeError:
245
+ return text
246
+ else:
247
+ # This case should ideally not be reached if ask() behaves as expected
248
+ raise TypeError(f"Invalid response type: {type(response)}. Expected dict.")
249
+
250
+ if __name__ == "__main__":
251
+ print("-" * 80)
252
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
253
+ print("-" * 80)
254
+
255
+ # Test all available models
256
+ working = 0
257
+ total = len(TypeGPT.AVAILABLE_MODELS)
258
+
259
+ for model in TypeGPT.AVAILABLE_MODELS:
260
+ try:
261
+ test_ai = TypeGPT(model=model, timeout=60)
262
+ # Test stream first
263
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
264
+ response_text = ""
265
+ print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
266
+ for chunk in response_stream:
267
+ response_text += chunk
268
+ # Optional: print chunks as they arrive for visual feedback
269
+ # print(chunk, end="", flush=True)
270
+
271
+ if response_text and len(response_text.strip()) > 0:
272
+ status = "✓"
273
+ # Clean and truncate response
274
+ clean_text = response_text.strip() # Already decoded in get_message
275
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
276
+ else:
277
+ status = "✗ (Stream)"
278
+ display_text = "Empty or invalid stream response"
279
+ print(f"\r{model:<50} {status:<10} {display_text}")
280
+
281
+ # Optional: Add non-stream test if needed, but stream test covers basic functionality
282
+ # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
283
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
284
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
285
+ # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
286
+
287
+
288
+ except Exception as e:
289
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,368 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ import re
6
+ from typing import Any, Dict, Optional, Generator, Union
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class UncovrAI(Provider):
15
+ """
16
+ A class to interact with the Uncovr AI chat API.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "default",
21
+ "gpt-4o-mini",
22
+ "gemini-2-flash",
23
+ "gemini-2-flash-lite",
24
+ "groq-llama-3-1-8b",
25
+ "o3-mini",
26
+ "deepseek-r1-distill-qwen-32b",
27
+ # The following models are not available in the free plan:
28
+ # "claude-3-7-sonnet",
29
+ # "gpt-4o",
30
+ # "claude-3-5-sonnet-v2",
31
+ # "deepseek-r1-distill-llama-70b",
32
+ # "gemini-2-flash-lite-preview",
33
+ # "qwen-qwq-32b"
34
+ ]
35
+
36
+ def __init__(
37
+ self,
38
+ is_conversation: bool = True,
39
+ max_tokens: int = 2049,
40
+ timeout: int = 30,
41
+ intro: str = None,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ history_offset: int = 10250,
46
+ act: str = None,
47
+ model: str = "default",
48
+ chat_id: str = None,
49
+ user_id: str = None,
50
+ browser: str = "chrome"
51
+ ):
52
+ """Initializes the Uncovr AI API client."""
53
+ if model not in self.AVAILABLE_MODELS:
54
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
+
56
+ self.url = "https://uncovr.app/api/workflows/chat"
57
+
58
+ # Initialize LitAgent for user agent generation
59
+ self.agent = LitAgent()
60
+ # Use fingerprinting to create a consistent browser identity
61
+ self.fingerprint = self.agent.generate_fingerprint(browser)
62
+
63
+ # Use the fingerprint for headers
64
+ self.headers = {
65
+ "Accept": self.fingerprint["accept"],
66
+ "Accept-Encoding": "gzip, deflate, br, zstd",
67
+ "Accept-Language": self.fingerprint["accept_language"],
68
+ "Content-Type": "application/json",
69
+ "Origin": "https://uncovr.app",
70
+ "Referer": "https://uncovr.app/",
71
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
72
+ "Sec-CH-UA-Mobile": "?0",
73
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
74
+ "User-Agent": self.fingerprint["user_agent"],
75
+ "Sec-Fetch-Dest": "empty",
76
+ "Sec-Fetch-Mode": "cors",
77
+ "Sec-Fetch-Site": "same-origin"
78
+ }
79
+
80
+ # Initialize curl_cffi Session
81
+ self.session = Session()
82
+ # Update curl_cffi session headers and proxies
83
+ self.session.headers.update(self.headers)
84
+ self.session.proxies.update(proxies)
85
+
86
+ self.is_conversation = is_conversation
87
+ self.max_tokens_to_sample = max_tokens
88
+ self.timeout = timeout
89
+ self.last_response = {}
90
+ self.model = model
91
+ self.chat_id = chat_id or str(uuid.uuid4())
92
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
93
+
94
+ self.__available_optimizers = (
95
+ method
96
+ for method in dir(Optimizers)
97
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
98
+ )
99
+ Conversation.intro = (
100
+ AwesomePrompts().get_act(
101
+ act, raise_not_found=True, default=None, case_insensitive=True
102
+ )
103
+ if act
104
+ else intro or Conversation.intro
105
+ )
106
+
107
+ self.conversation = Conversation(
108
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
109
+ )
110
+ self.conversation.history_offset = history_offset
111
+
112
+ @staticmethod
113
+ def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts content from the UncovrAI stream format '0:"..."'."""
115
+ if isinstance(chunk, str):
116
+ match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
117
+ if match:
118
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
119
+ content = match.group(1).encode().decode('unicode_escape')
120
+ return content.replace('\\\\', '\\').replace('\\"', '"')
121
+ return None
122
+
123
+ def refresh_identity(self, browser: str = None):
124
+ """
125
+ Refreshes the browser identity fingerprint.
126
+
127
+ Args:
128
+ browser: Specific browser to use for the new fingerprint
129
+ """
130
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
131
+ self.fingerprint = self.agent.generate_fingerprint(browser)
132
+
133
+ # Update headers with new fingerprint
134
+ self.headers.update({
135
+ "Accept": self.fingerprint["accept"],
136
+ "Accept-Language": self.fingerprint["accept_language"],
137
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
138
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
139
+ "User-Agent": self.fingerprint["user_agent"],
140
+ })
141
+
142
+ # Update session headers
143
+ for header, value in self.headers.items():
144
+ self.session.headers[header] = value
145
+
146
+ return self.fingerprint
147
+
148
+ def ask(
149
+ self,
150
+ prompt: str,
151
+ stream: bool = False,
152
+ raw: bool = False,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ temperature: int = 32,
156
+ creativity: str = "medium",
157
+ selected_focus: list = ["web"],
158
+ selected_tools: list = ["quick-cards"]
159
+ ) -> Union[Dict[str, Any], Generator]:
160
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
161
+ if optimizer:
162
+ if optimizer in self.__available_optimizers:
163
+ conversation_prompt = getattr(Optimizers, optimizer)(
164
+ conversation_prompt if conversationally else prompt
165
+ )
166
+ else:
167
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
168
+
169
+ # Prepare the request payload
170
+ payload = {
171
+ "content": conversation_prompt,
172
+ "chatId": self.chat_id,
173
+ "userMessageId": str(uuid.uuid4()),
174
+ "ai_config": {
175
+ "selectedFocus": selected_focus,
176
+ "selectedTools": selected_tools,
177
+ "agentId": "chat",
178
+ "modelId": self.model,
179
+ "temperature": temperature,
180
+ "creativity": creativity
181
+ }
182
+ }
183
+
184
+ def for_stream():
185
+ try:
186
+ # Use curl_cffi session post with impersonate
187
+ response = self.session.post(
188
+ self.url,
189
+ json=payload,
190
+ stream=True,
191
+ timeout=self.timeout,
192
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
193
+ )
194
+
195
+ if response.status_code != 200:
196
+ # If we get a non-200 response, try refreshing our identity once
197
+ if response.status_code in [403, 429]:
198
+ self.refresh_identity()
199
+ # Retry with new identity using curl_cffi session
200
+ retry_response = self.session.post(
201
+ self.url,
202
+ json=payload,
203
+ stream=True,
204
+ timeout=self.timeout,
205
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
206
+ )
207
+ if not retry_response.ok:
208
+ raise exceptions.FailedToGenerateResponseError(
209
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
+ )
211
+ response = retry_response # Use the successful retry response
212
+ else:
213
+ raise exceptions.FailedToGenerateResponseError(
214
+ f"Request failed with status code {response.status_code} - {response.text}"
215
+ )
216
+
217
+ streaming_text = ""
218
+ # Use sanitize_stream with the custom extractor
219
+ processed_stream = sanitize_stream(
220
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
221
+ intro_value=None, # No simple prefix
222
+ to_json=False, # Content is not JSON
223
+ content_extractor=self._uncovr_extractor, # Use the specific extractor
224
+ yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
225
+ )
226
+
227
+ for content_chunk in processed_stream:
228
+ if content_chunk and isinstance(content_chunk, str):
229
+ streaming_text += content_chunk
230
+ yield dict(text=content_chunk) if not raw else content_chunk
231
+
232
+ self.last_response = {"text": streaming_text}
233
+ self.conversation.update_chat_history(prompt, streaming_text)
234
+
235
+ except CurlError as e: # Catch CurlError
236
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
+ except Exception as e: # Catch other potential exceptions
238
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
+
240
+
241
+ def for_non_stream():
242
+ try:
243
+ # Use curl_cffi session post with impersonate
244
+ response = self.session.post(
245
+ self.url,
246
+ json=payload,
247
+ timeout=self.timeout,
248
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
249
+ )
250
+
251
+ if response.status_code != 200:
252
+ if response.status_code in [403, 429]:
253
+ self.refresh_identity()
254
+ # Retry with new identity using curl_cffi session
255
+ response = self.session.post(
256
+ self.url,
257
+ json=payload,
258
+ timeout=self.timeout,
259
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
260
+ )
261
+ if not response.ok:
262
+ raise exceptions.FailedToGenerateResponseError(
263
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
264
+ )
265
+ else:
266
+ raise exceptions.FailedToGenerateResponseError(
267
+ f"Request failed with status code {response.status_code} - {response.text}"
268
+ )
269
+
270
+ response_text = response.text # Get the full response text
271
+
272
+ # Use sanitize_stream to process the non-streaming text
273
+ # It won't parse as JSON, but will apply the extractor line by line
274
+ processed_stream = sanitize_stream(
275
+ data=response_text.splitlines(), # Split into lines first
276
+ intro_value=None,
277
+ to_json=False,
278
+ content_extractor=self._uncovr_extractor,
279
+ yield_raw_on_error=True
280
+ )
281
+
282
+ # Aggregate the results from the generator
283
+ full_response = ""
284
+ for content in processed_stream:
285
+ if content and isinstance(content, str):
286
+ full_response += content
287
+
288
+ # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
289
+ self.last_response = {"text": full_response}
290
+ self.conversation.update_chat_history(prompt, full_response)
291
+ return {"text": full_response}
292
+
293
+ except CurlError as e: # Catch CurlError
294
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
+ except Exception as e: # Catch other potential exceptions
296
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
+
298
+
299
+ return for_stream() if stream else for_non_stream()
300
+
301
+ def chat(
302
+ self,
303
+ prompt: str,
304
+ stream: bool = False,
305
+ optimizer: str = None,
306
+ conversationally: bool = False,
307
+ temperature: int = 32,
308
+ creativity: str = "medium",
309
+ selected_focus: list = ["web"],
310
+ selected_tools: list = []
311
+ ) -> Union[str, Generator[str, None, None]]:
312
+ def for_stream():
313
+ for response in self.ask(
314
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
315
+ temperature=temperature, creativity=creativity,
316
+ selected_focus=selected_focus, selected_tools=selected_tools
317
+ ):
318
+ yield self.get_message(response)
319
+ def for_non_stream():
320
+ return self.get_message(
321
+ self.ask(
322
+ prompt, False, optimizer=optimizer, conversationally=conversationally,
323
+ temperature=temperature, creativity=creativity,
324
+ selected_focus=selected_focus, selected_tools=selected_tools
325
+ )
326
+ )
327
+ return for_stream() if stream else for_non_stream()
328
+
329
+ def get_message(self, response: dict) -> str:
330
+ assert isinstance(response, dict), "Response should be of dict data-type only"
331
+ # Formatting handled by extractor
332
+ text = response.get("text", "")
333
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
334
+
335
+ if __name__ == "__main__":
336
+ # Ensure curl_cffi is installed
337
+ print("-" * 80)
338
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
339
+ print("-" * 80)
340
+
341
+ for model in UncovrAI.AVAILABLE_MODELS:
342
+ try:
343
+ test_ai = UncovrAI(model=model, timeout=60)
344
+ # Test non-stream first as stream logic depends on it
345
+ response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
346
+
347
+ if response_non_stream and len(response_non_stream.strip()) > 0:
348
+ # Now test stream
349
+ response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
+ response_text = ""
351
+ for chunk in response_stream:
352
+ response_text += chunk
353
+
354
+ if response_text and len(response_text.strip()) > 0:
355
+ status = "✓"
356
+ # Clean and truncate response
357
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
+ else:
360
+ status = "✗ (Stream)"
361
+ display_text = "Empty or invalid stream response"
362
+ else:
363
+ status = "✗ (Non-Stream)"
364
+ display_text = "Empty or invalid non-stream response"
365
+
366
+ print(f"\r{model:<50} {status:<10} {display_text}")
367
+ except Exception as e:
368
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")