webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,258 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ from typing import Union, Any, Dict, Optional, Generator, List
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent as Lit
12
+
13
+ class LLMChat(Provider):
14
+ """
15
+ A class to interact with the LLMChat API
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "@cf/meta/llama-3.1-70b-instruct",
20
+ "@cf/meta/llama-3.1-8b-instruct",
21
+ "@cf/meta/llama-3.2-3b-instruct",
22
+ "@cf/meta/llama-3.2-1b-instruct",
23
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
24
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
25
+ "@cf/meta/llama-4-scout-17b-16e-instruct",
26
+ "@cf/mistralai/mistral-small-3.1-24b-instruct",
27
+ "@cf/google/gemma-3-12b-it",
28
+ ]
29
+
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 2048,
34
+ timeout: int = 30,
35
+ intro: str = None,
36
+ filepath: str = None,
37
+ update_file: bool = True,
38
+ proxies: dict = {},
39
+ history_offset: int = 10250,
40
+ act: str = None,
41
+ model: str = "@cf/meta/llama-3.1-70b-instruct",
42
+ system_prompt: str = "You are a helpful assistant."
43
+ ):
44
+ """
45
+ Initializes the LLMChat API with given parameters.
46
+ """
47
+
48
+ if model not in self.AVAILABLE_MODELS:
49
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
50
+
51
+ # Initialize curl_cffi Session
52
+ self.session = Session()
53
+ self.is_conversation = is_conversation
54
+ self.max_tokens_to_sample = max_tokens
55
+ self.api_endpoint = "https://llmchat.in/inference/stream"
56
+ self.timeout = timeout
57
+ self.last_response = {}
58
+ self.model = model
59
+ self.system_prompt = system_prompt
60
+
61
+ self.headers = {
62
+ "Content-Type": "application/json",
63
+ "Accept": "*/*",
64
+ "Origin": "https://llmchat.in",
65
+ "Referer": "https://llmchat.in/"
66
+ }
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+
74
+ Conversation.intro = (
75
+ AwesomePrompts().get_act(
76
+ act, raise_not_found=True, default=None, case_insensitive=True
77
+ )
78
+ if act
79
+ else intro or Conversation.intro
80
+ )
81
+
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+
87
+ # Update curl_cffi session headers and proxies
88
+ self.session.headers.update(self.headers)
89
+ self.session.proxies = proxies # Assign proxies directly
90
+
91
+ def ask(
92
+ self,
93
+ prompt: str,
94
+ stream: bool = False,
95
+ raw: bool = False,
96
+ optimizer: str = None,
97
+ conversationally: bool = False,
98
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
99
+ """Chat with LLMChat with logging capabilities"""
100
+
101
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
+ if optimizer:
103
+ if optimizer in self.__available_optimizers:
104
+ conversation_prompt = getattr(Optimizers, optimizer)(
105
+ conversation_prompt if conversationally else prompt
106
+ )
107
+ else:
108
+ raise exceptions.FailedToGenerateResponseError(
109
+ f"Optimizer is not one of {self.__available_optimizers}"
110
+ )
111
+
112
+ url = f"{self.api_endpoint}?model={self.model}"
113
+ payload = {
114
+ "messages": [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt}
117
+ ],
118
+ "max_tokens": self.max_tokens_to_sample,
119
+ "stream": True # API seems to always stream based on endpoint name
120
+ }
121
+
122
+ def for_stream():
123
+ full_response = "" # Initialize outside try block
124
+ try:
125
+ # Use curl_cffi session post with impersonate
126
+ response = self.session.post(
127
+ url,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self.timeout,
131
+ impersonate="chrome110" # Use a common impersonation profile
132
+ )
133
+ response.raise_for_status() # Check for HTTP errors
134
+
135
+ # Iterate over bytes and decode manually
136
+ for line_bytes in response.iter_lines():
137
+ if line_bytes:
138
+ try:
139
+ line = line_bytes.decode('utf-8')
140
+ if line.startswith('data: '):
141
+ data_str = line[6:]
142
+ if data_str == '[DONE]':
143
+ break
144
+ try:
145
+ data = json.loads(data_str)
146
+ if data.get('response'):
147
+ response_text = data['response']
148
+ full_response += response_text
149
+ resp = dict(text=response_text)
150
+ # Yield dict or raw string chunk
151
+ yield resp if not raw else response_text
152
+ except json.JSONDecodeError:
153
+ continue # Ignore invalid JSON data
154
+ except UnicodeDecodeError:
155
+ continue # Ignore decoding errors
156
+
157
+ # Update history after stream finishes
158
+ self.last_response = dict(text=full_response)
159
+ self.conversation.update_chat_history(
160
+ prompt, full_response
161
+ )
162
+
163
+ except CurlError as e: # Catch CurlError
164
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
165
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
166
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
167
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
168
+
169
+ def for_non_stream():
170
+ # Aggregate the stream using the updated for_stream logic
171
+ full_response_text = ""
172
+ try:
173
+ # Ensure raw=False so for_stream yields dicts
174
+ for chunk_data in for_stream():
175
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
176
+ full_response_text += chunk_data["text"]
177
+ # Handle raw string case if raw=True was passed
178
+ elif raw and isinstance(chunk_data, str):
179
+ full_response_text += chunk_data
180
+ except Exception as e:
181
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
182
+ if not full_response_text:
183
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
184
+
185
+ # last_response and history are updated within for_stream
186
+ # Return the final aggregated response dict or raw string
187
+ return full_response_text if raw else self.last_response
188
+
189
+
190
+ # Since the API endpoint suggests streaming, always call the stream generator.
191
+ # The non-stream wrapper will handle aggregation if stream=False.
192
+ return for_stream() if stream else for_non_stream()
193
+
194
+ def chat(
195
+ self,
196
+ prompt: str,
197
+ stream: bool = False,
198
+ optimizer: str = None,
199
+ conversationally: bool = False,
200
+ ) -> Union[str, Generator[str, None, None]]:
201
+ """Generate response with logging capabilities"""
202
+
203
+ def for_stream_chat():
204
+ # ask() yields dicts or strings when streaming
205
+ gen = self.ask(
206
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
207
+ optimizer=optimizer, conversationally=conversationally
208
+ )
209
+ for response_dict in gen:
210
+ yield self.get_message(response_dict) # get_message expects dict
211
+
212
+ def for_non_stream_chat():
213
+ # ask() returns dict or str when not streaming
214
+ response_data = self.ask(
215
+ prompt,
216
+ stream=False,
217
+ raw=False, # Ensure ask returns dict
218
+ optimizer=optimizer,
219
+ conversationally=conversationally,
220
+ )
221
+ return self.get_message(response_data) # get_message expects dict
222
+
223
+ return for_stream_chat() if stream else for_non_stream_chat()
224
+
225
+ def get_message(self, response: Dict[str, Any]) -> str:
226
+ """Retrieves message from response with validation"""
227
+ assert isinstance(response, dict), "Response should be of dict data-type only"
228
+ return response["text"]
229
+
230
+ if __name__ == "__main__":
231
+ # Ensure curl_cffi is installed
232
+ print("-" * 80)
233
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
234
+ print("-" * 80)
235
+
236
+ # Test all available models
237
+ working = 0
238
+ total = len(LLMChat.AVAILABLE_MODELS)
239
+
240
+ for model in LLMChat.AVAILABLE_MODELS:
241
+ try:
242
+ test_ai = LLMChat(model=model, timeout=60)
243
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
244
+ response_text = ""
245
+ for chunk in response:
246
+ response_text += chunk
247
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
248
+
249
+ if response_text and len(response_text.strip()) > 0:
250
+ status = "✓"
251
+ # Truncate response if too long
252
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
253
+ else:
254
+ status = "✗"
255
+ display_text = "Empty or invalid response"
256
+ print(f"\r{model:<50} {status:<10} {display_text}")
257
+ except Exception as e:
258
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,306 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ import re
6
+ from typing import Union, Any, Dict, Optional, Generator, List
7
+
8
+ from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent as Lit
14
+
15
+ class LLMChatCo(Provider):
16
+ """
17
+ A class to interact with the LLMChat.co API
18
+ """
19
+
20
+ AVAILABLE_MODELS = [
21
+ "gemini-flash-2.0", # Default model
22
+ "llama-4-scout",
23
+ "gpt-4o-mini",
24
+ "gpt-4.1-nano",
25
+
26
+
27
+ # "gpt-4.1",
28
+ # "gpt-4.1-mini",
29
+ # "o3-mini",
30
+ # "claude-3-5-sonnet",
31
+ # "deepseek-r1",
32
+ # "claude-3-7-sonnet",
33
+ # "deep", # deep research mode
34
+ # "pro" # pro research mode
35
+
36
+ ]
37
+
38
+ def __init__(
39
+ self,
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
42
+ timeout: int = 60,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ model: str = "gemini-flash-2.0",
50
+ system_prompt: str = "You are a helpful assistant."
51
+ ):
52
+ """
53
+ Initializes the LLMChat.co API with given parameters.
54
+ """
55
+
56
+ if model not in self.AVAILABLE_MODELS:
57
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
+
59
+ # Initialize curl_cffi Session
60
+ self.session = Session()
61
+ self.is_conversation = is_conversation
62
+ self.max_tokens_to_sample = max_tokens
63
+ self.api_endpoint = "https://llmchat.co/api/completion"
64
+ self.timeout = timeout
65
+ self.last_response = {}
66
+ self.model = model
67
+ self.system_prompt = system_prompt
68
+ self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
69
+
70
+ # Create LitAgent instance (keep if needed for other headers)
71
+ lit_agent = Lit()
72
+
73
+ # Headers based on the provided request
74
+ self.headers = {
75
+ "Content-Type": "application/json",
76
+ "Accept": "text/event-stream",
77
+ "User-Agent": lit_agent.random(),
78
+ "Accept-Language": "en-US,en;q=0.9",
79
+ "Origin": "https://llmchat.co",
80
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}",
81
+ "DNT": "1",
82
+ "Sec-Fetch-Dest": "empty",
83
+ "Sec-Fetch-Mode": "cors",
84
+ "Sec-Fetch-Site": "same-origin",
85
+ # Add sec-ch-ua headers if needed for impersonation consistency
86
+ }
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+
94
+ Conversation.intro = (
95
+ AwesomePrompts().get_act(
96
+ act, raise_not_found=True, default=None, case_insensitive=True
97
+ )
98
+ if act
99
+ else intro or Conversation.intro
100
+ )
101
+
102
+ self.conversation = Conversation(
103
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
104
+ )
105
+ self.conversation.history_offset = history_offset
106
+ # Update curl_cffi session headers and proxies
107
+ self.session.headers.update(self.headers)
108
+ self.session.proxies = proxies # Assign proxies directly
109
+ # Store message history for conversation context
110
+ self.last_assistant_response = ""
111
+
112
+ @staticmethod
113
+ def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts text content from LLMChat.co stream JSON objects."""
115
+ if isinstance(chunk, dict) and "answer" in chunk:
116
+ answer = chunk["answer"]
117
+ # Prefer fullText if available and status is COMPLETED
118
+ if answer.get("fullText") and answer.get("status") == "COMPLETED":
119
+ return answer["fullText"]
120
+ elif "text" in answer:
121
+ return answer["text"]
122
+ return None
123
+
124
+ def ask(
125
+ self,
126
+ prompt: str,
127
+ stream: bool = True, # Default to stream as the API uses SSE
128
+ raw: bool = False,
129
+ optimizer: str = None,
130
+ conversationally: bool = False,
131
+ web_search: bool = False,
132
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
133
+ """Chat with LLMChat.co with streaming capabilities"""
134
+
135
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
136
+ if optimizer:
137
+ if optimizer in self.__available_optimizers:
138
+ conversation_prompt = getattr(Optimizers, optimizer)(
139
+ conversation_prompt if conversationally else prompt
140
+ )
141
+ else:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Optimizer is not one of {self.__available_optimizers}"
144
+ )
145
+
146
+
147
+ # Generate a unique ID for this message
148
+ thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
149
+ messages = [
150
+ {"role": "system", "content": self.system_prompt},
151
+ {"role": "user", "content": prompt},
152
+ ]
153
+ # Prepare payload for the API request based on observed request format
154
+ payload = {
155
+ "mode": self.model,
156
+ "prompt": prompt,
157
+ "threadId": self.thread_id,
158
+ "messages": messages,
159
+ "mcpConfig": {},
160
+ "threadItemId": thread_item_id,
161
+ "parentThreadItemId": "",
162
+ "webSearch": web_search,
163
+ "showSuggestions": True
164
+ }
165
+
166
+ def for_stream():
167
+ full_response = "" # Initialize outside try block
168
+ try:
169
+ # Use curl_cffi session post with impersonate
170
+ response = self.session.post(
171
+ self.api_endpoint,
172
+ json=payload,
173
+ # headers are set on the session
174
+ stream=True,
175
+ timeout=self.timeout,
176
+ # proxies are set on the session
177
+ impersonate="chrome110" # Use a common impersonation profile
178
+ )
179
+ response.raise_for_status() # Check for HTTP errors
180
+
181
+ # Use sanitize_stream
182
+ # Note: This won't handle SSE 'event:' lines, only 'data:' lines.
183
+ # The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
184
+ processed_stream = sanitize_stream(
185
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
186
+ intro_value="data:",
187
+ to_json=True, # Stream sends JSON
188
+ content_extractor=self._llmchatco_extractor, # Use the specific extractor
189
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
190
+ )
191
+
192
+ last_yielded_text = ""
193
+ for current_full_text in processed_stream:
194
+ # current_full_text is the full text extracted by _llmchatco_extractor
195
+ if current_full_text and isinstance(current_full_text, str):
196
+ # Calculate the new part of the text
197
+ new_text = current_full_text[len(last_yielded_text):]
198
+ if new_text:
199
+ full_response = current_full_text # Keep track of the latest full text
200
+ last_yielded_text = current_full_text # Update tracker
201
+ resp = dict(text=new_text)
202
+ # Yield dict or raw string chunk
203
+ yield resp if not raw else new_text
204
+
205
+ # Update history after stream finishes
206
+ self.last_response = dict(text=full_response)
207
+ self.last_assistant_response = full_response
208
+ self.conversation.update_chat_history(
209
+ prompt, full_response
210
+ )
211
+
212
+ except CurlError as e: # Catch CurlError
213
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
214
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
215
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
216
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
217
+
218
+ def for_non_stream():
219
+ # Aggregate the stream using the updated for_stream logic
220
+ full_response_text = ""
221
+ try:
222
+ # Ensure raw=False so for_stream yields dicts
223
+ for chunk_data in for_stream():
224
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
225
+ full_response_text += chunk_data["text"]
226
+ # Handle raw string case if raw=True was passed
227
+ elif raw and isinstance(chunk_data, str):
228
+ full_response_text += chunk_data
229
+
230
+ except Exception as e:
231
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
232
+ if not full_response_text:
233
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
234
+
235
+ # last_response and history are updated within for_stream
236
+ # Return the final aggregated response dict or raw string
237
+ return full_response_text if raw else self.last_response
238
+
239
+
240
+ return for_stream() if stream else for_non_stream()
241
+
242
+ def chat(
243
+ self,
244
+ prompt: str,
245
+ stream: bool = False,
246
+ optimizer: str = None,
247
+ conversationally: bool = False,
248
+ web_search: bool = False,
249
+ ) -> Union[str, Generator[str, None, None]]:
250
+ """Generate response with streaming capabilities"""
251
+
252
+ def for_stream_chat():
253
+ # ask() yields dicts or strings when streaming
254
+ gen = self.ask(
255
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
256
+ optimizer=optimizer, conversationally=conversationally,
257
+ web_search=web_search
258
+ )
259
+ for response_dict in gen:
260
+ yield self.get_message(response_dict) # get_message expects dict
261
+
262
+ def for_non_stream_chat():
263
+ # ask() returns dict or str when not streaming
264
+ response_data = self.ask(
265
+ prompt,
266
+ stream=False,
267
+ raw=False, # Ensure ask returns dict
268
+ optimizer=optimizer,
269
+ conversationally=conversationally,
270
+ web_search=web_search
271
+ )
272
+ return self.get_message(response_data) # get_message expects dict
273
+
274
+ return for_stream_chat() if stream else for_non_stream_chat()
275
+
276
+ def get_message(self, response: Dict[str, Any]) -> str:
277
+ """Retrieves message from response with validation"""
278
+ assert isinstance(response, dict), "Response should be of dict data-type only"
279
+ return response["text"]
280
+
281
+ if __name__ == "__main__":
282
+ # Ensure curl_cffi is installed
283
+ print("-" * 80)
284
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
285
+ print("-" * 80)
286
+
287
+ # Test all available models
288
+ working = 0
289
+ total = len(LLMChatCo.AVAILABLE_MODELS)
290
+
291
+ for model in LLMChatCo.AVAILABLE_MODELS:
292
+ try:
293
+ test_ai = LLMChatCo(model=model, timeout=60)
294
+ response = test_ai.chat("Say 'Hello' in one word")
295
+ response_text = response
296
+
297
+ if response_text and len(response_text.strip()) > 0:
298
+ status = "✓"
299
+ # Truncate response if too long
300
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
301
+ else:
302
+ status = "✗"
303
+ display_text = "Empty or invalid response"
304
+ print(f"{model:<50} {status:<10} {display_text}")
305
+ except Exception as e:
306
+ print(f"{model:<50} {'✗':<10} {str(e)}")