webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,361 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import curl_cffi for improved request handling
8
+ from curl_cffi.requests import Session
9
+ from curl_cffi import CurlError
10
+
11
+ # Import base classes and utility structures
12
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from .utils import (
14
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
+ ChatCompletionMessage, CompletionUsage
16
+ )
17
+
18
+ # Attempt to import LitAgent, fallback if not available
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ pass
23
+
24
+ # --- Groq Client ---
25
+
26
+ class Completions(BaseCompletions):
27
+ def __init__(self, client: 'Groq'):
28
+ self._client = client
29
+
30
+ def create(
31
+ self,
32
+ *,
33
+ model: str,
34
+ messages: List[Dict[str, str]],
35
+ max_tokens: Optional[int] = 2049,
36
+ stream: bool = False,
37
+ temperature: Optional[float] = None,
38
+ top_p: Optional[float] = None,
39
+ **kwargs: Any
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ payload = {
46
+ "model": model,
47
+ "messages": messages,
48
+ "max_tokens": max_tokens,
49
+ "stream": stream,
50
+ }
51
+ if temperature is not None:
52
+ payload["temperature"] = temperature
53
+ if top_p is not None:
54
+ payload["top_p"] = top_p
55
+
56
+ # Add frequency_penalty and presence_penalty if provided
57
+ if "frequency_penalty" in kwargs:
58
+ payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
59
+ if "presence_penalty" in kwargs:
60
+ payload["presence_penalty"] = kwargs.pop("presence_penalty")
61
+
62
+ # Add any tools if provided
63
+ if "tools" in kwargs and kwargs["tools"]:
64
+ payload["tools"] = kwargs.pop("tools")
65
+
66
+ payload.update(kwargs)
67
+
68
+ request_id = f"chatcmpl-{uuid.uuid4()}"
69
+ created_time = int(time.time())
70
+
71
+ if stream:
72
+ return self._create_stream(request_id, created_time, model, payload)
73
+ else:
74
+ return self._create_non_stream(request_id, created_time, model, payload)
75
+
76
+ def _create_stream(
77
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
78
+ ) -> Generator[ChatCompletionChunk, None, None]:
79
+ try:
80
+ response = self._client.session.post(
81
+ self._client.base_url,
82
+ json=payload,
83
+ stream=True,
84
+ timeout=self._client.timeout,
85
+ impersonate="chrome110" # Use impersonate for better compatibility
86
+ )
87
+
88
+ if response.status_code != 200:
89
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
90
+
91
+ # Track token usage across chunks
92
+ prompt_tokens = 0
93
+ completion_tokens = 0
94
+ total_tokens = 0
95
+
96
+ for line in response.iter_lines(decode_unicode=True):
97
+ if line:
98
+ if line.startswith("data: "):
99
+ json_str = line[6:]
100
+ if json_str == "[DONE]":
101
+ break
102
+
103
+ try:
104
+ data = json.loads(json_str)
105
+ choice_data = data.get('choices', [{}])[0]
106
+ delta_data = choice_data.get('delta', {})
107
+ finish_reason = choice_data.get('finish_reason')
108
+
109
+ # Update token counts if available
110
+ usage_data = data.get('usage', {})
111
+ if usage_data:
112
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
113
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
114
+ total_tokens = usage_data.get('total_tokens', total_tokens)
115
+
116
+ # Create the delta object
117
+ delta = ChoiceDelta(
118
+ content=delta_data.get('content'),
119
+ role=delta_data.get('role'),
120
+ tool_calls=delta_data.get('tool_calls')
121
+ )
122
+
123
+ # Create the choice object
124
+ choice = Choice(
125
+ index=choice_data.get('index', 0),
126
+ delta=delta,
127
+ finish_reason=finish_reason,
128
+ logprobs=choice_data.get('logprobs')
129
+ )
130
+
131
+ # Create the chunk object
132
+ chunk = ChatCompletionChunk(
133
+ id=request_id,
134
+ choices=[choice],
135
+ created=created_time,
136
+ model=model,
137
+ system_fingerprint=data.get('system_fingerprint')
138
+ )
139
+
140
+ # Convert to dict for proper formatting
141
+ chunk_dict = chunk.to_dict()
142
+
143
+ # Add usage information to match OpenAI format
144
+ usage_dict = {
145
+ "prompt_tokens": prompt_tokens or 10,
146
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
147
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
148
+ "estimated_cost": None
149
+ }
150
+
151
+ # Update completion_tokens and total_tokens as we receive more content
152
+ if delta_data.get('content'):
153
+ completion_tokens += 1
154
+ total_tokens = prompt_tokens + completion_tokens
155
+ usage_dict["completion_tokens"] = completion_tokens
156
+ usage_dict["total_tokens"] = total_tokens
157
+
158
+ chunk_dict["usage"] = usage_dict
159
+
160
+ yield chunk
161
+ except json.JSONDecodeError:
162
+ print(f"Warning: Could not decode JSON line: {json_str}")
163
+ continue
164
+ except CurlError as e:
165
+ print(f"Error during Groq stream request: {e}")
166
+ raise IOError(f"Groq request failed: {e}") from e
167
+ except Exception as e:
168
+ print(f"Error processing Groq stream: {e}")
169
+ raise
170
+
171
+ def _create_non_stream(
172
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
173
+ ) -> ChatCompletion:
174
+ try:
175
+ response = self._client.session.post(
176
+ self._client.base_url,
177
+ json=payload,
178
+ timeout=self._client.timeout,
179
+ impersonate="chrome110" # Use impersonate for better compatibility
180
+ )
181
+
182
+ if response.status_code != 200:
183
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
184
+
185
+ data = response.json()
186
+
187
+ choices_data = data.get('choices', [])
188
+ usage_data = data.get('usage', {})
189
+
190
+ choices = []
191
+ for choice_d in choices_data:
192
+ message_d = choice_d.get('message', {})
193
+
194
+ # Handle tool calls if present
195
+ tool_calls = message_d.get('tool_calls')
196
+
197
+ message = ChatCompletionMessage(
198
+ role=message_d.get('role', 'assistant'),
199
+ content=message_d.get('content', ''),
200
+ tool_calls=tool_calls
201
+ )
202
+ choice = Choice(
203
+ index=choice_d.get('index', 0),
204
+ message=message,
205
+ finish_reason=choice_d.get('finish_reason', 'stop')
206
+ )
207
+ choices.append(choice)
208
+
209
+ usage = CompletionUsage(
210
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
211
+ completion_tokens=usage_data.get('completion_tokens', 0),
212
+ total_tokens=usage_data.get('total_tokens', 0)
213
+ )
214
+
215
+ completion = ChatCompletion(
216
+ id=request_id,
217
+ choices=choices,
218
+ created=created_time,
219
+ model=data.get('model', model),
220
+ usage=usage,
221
+ )
222
+ return completion
223
+
224
+ except CurlError as e:
225
+ print(f"Error during Groq non-stream request: {e}")
226
+ raise IOError(f"Groq request failed: {e}") from e
227
+ except Exception as e:
228
+ print(f"Error processing Groq response: {e}")
229
+ raise
230
+
231
+ class Chat(BaseChat):
232
+ def __init__(self, client: 'Groq'):
233
+ self.completions = Completions(client)
234
+
235
+ class Groq(OpenAICompatibleProvider):
236
+ AVAILABLE_MODELS = [
237
+ "distil-whisper-large-v3-en",
238
+ "gemma2-9b-it",
239
+ "llama-3.3-70b-versatile",
240
+ "llama-3.1-8b-instant",
241
+ "llama-guard-3-8b",
242
+ "llama3-70b-8192",
243
+ "llama3-8b-8192",
244
+ "whisper-large-v3",
245
+ "whisper-large-v3-turbo",
246
+ "meta-llama/llama-4-scout-17b-16e-instruct",
247
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
248
+ "playai-tts",
249
+ "playai-tts-arabic",
250
+ "qwen-qwq-32b",
251
+ "mistral-saba-24b",
252
+ "qwen-2.5-coder-32b",
253
+ "qwen-2.5-32b",
254
+ "deepseek-r1-distill-qwen-32b",
255
+ "deepseek-r1-distill-llama-70b",
256
+ "llama-3.3-70b-specdec",
257
+ "llama-3.2-1b-preview",
258
+ "llama-3.2-3b-preview",
259
+ "llama-3.2-11b-vision-preview",
260
+ "llama-3.2-90b-vision-preview",
261
+ "mixtral-8x7b-32768"
262
+ ]
263
+
264
+ def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
265
+ self.timeout = timeout
266
+ self.base_url = "https://api.groq.com/openai/v1/chat/completions"
267
+ self.api_key = api_key
268
+
269
+ # Initialize curl_cffi Session
270
+ self.session = Session()
271
+
272
+ # Set up headers with API key if provided
273
+ self.headers = {
274
+ "Content-Type": "application/json",
275
+ }
276
+
277
+ if api_key:
278
+ self.headers["Authorization"] = f"Bearer {api_key}"
279
+
280
+ # Try to use LitAgent for browser fingerprinting
281
+ try:
282
+ agent = LitAgent()
283
+ fingerprint = agent.generate_fingerprint(browser)
284
+
285
+ self.headers.update({
286
+ "Accept": fingerprint["accept"],
287
+ "Accept-Encoding": "gzip, deflate, br, zstd",
288
+ "Accept-Language": fingerprint["accept_language"],
289
+ "Cache-Control": "no-cache",
290
+ "Connection": "keep-alive",
291
+ "Origin": "https://console.groq.com",
292
+ "Pragma": "no-cache",
293
+ "Referer": "https://console.groq.com/",
294
+ "Sec-Fetch-Dest": "empty",
295
+ "Sec-Fetch-Mode": "cors",
296
+ "Sec-Fetch-Site": "same-site",
297
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
298
+ "Sec-CH-UA-Mobile": "?0",
299
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
300
+ "User-Agent": fingerprint["user_agent"],
301
+ })
302
+ except (NameError, Exception):
303
+ # Fallback to basic headers if LitAgent is not available
304
+ self.headers.update({
305
+ "Accept": "application/json",
306
+ "Accept-Encoding": "gzip, deflate, br",
307
+ "Accept-Language": "en-US,en;q=0.9",
308
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
309
+ })
310
+
311
+ # Update session headers
312
+ self.session.headers.update(self.headers)
313
+
314
+ # Initialize chat interface
315
+ self.chat = Chat(self)
316
+
317
+ @classmethod
318
+ def get_models(cls, api_key: str = None):
319
+ """Fetch available models from Groq API.
320
+
321
+ Args:
322
+ api_key (str, optional): Groq API key. If not provided, returns default models.
323
+
324
+ Returns:
325
+ list: List of available model IDs
326
+ """
327
+ if not api_key:
328
+ return cls.AVAILABLE_MODELS
329
+
330
+ try:
331
+ # Use a temporary curl_cffi session for this class method
332
+ temp_session = Session()
333
+ headers = {
334
+ "Content-Type": "application/json",
335
+ "Authorization": f"Bearer {api_key}",
336
+ }
337
+
338
+ response = temp_session.get(
339
+ "https://api.groq.com/openai/v1/models",
340
+ headers=headers,
341
+ impersonate="chrome110" # Use impersonate for fetching
342
+ )
343
+
344
+ if response.status_code != 200:
345
+ return cls.AVAILABLE_MODELS
346
+
347
+ data = response.json()
348
+ if "data" in data and isinstance(data["data"], list):
349
+ return [model["id"] for model in data["data"]]
350
+ return cls.AVAILABLE_MODELS
351
+
352
+ except (CurlError, Exception):
353
+ # Fallback to default models list if fetching fails
354
+ return cls.AVAILABLE_MODELS
355
+
356
+ @property
357
+ def models(self):
358
+ class _ModelList:
359
+ def list(inner_self):
360
+ return type(self).AVAILABLE_MODELS
361
+ return _ModelList()
@@ -0,0 +1,307 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+
6
+ from webscout.litagent import LitAgent
7
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
+ from .utils import (
9
+ ChatCompletion,
10
+ ChatCompletionChunk,
11
+ Choice,
12
+ ChatCompletionMessage,
13
+ ChoiceDelta,
14
+ CompletionUsage,
15
+ format_prompt
16
+ )
17
+
18
+ # ANSI escape codes for formatting
19
+ BOLD = "\033[1m"
20
+ RED = "\033[91m"
21
+ RESET = "\033[0m"
22
+
23
+ class Completions(BaseCompletions):
24
+ def __init__(self, client: 'HeckAI'):
25
+ self._client = client
26
+
27
+ def create(
28
+ self,
29
+ *,
30
+ model: str,
31
+ messages: List[Dict[str, str]],
32
+ max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
33
+ stream: bool = False,
34
+ temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
35
+ top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
+ **kwargs: Any # Not used by HeckAI but kept for compatibility
37
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
+ """
39
+ Creates a model response for the given chat conversation.
40
+ Mimics openai.chat.completions.create
41
+ """
42
+ # Format the messages using the format_prompt utility
43
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
44
+ # HeckAI works better with a properly formatted conversation
45
+ question = format_prompt(messages, add_special_tokens=True)
46
+
47
+ # Prepare the payload for HeckAI API
48
+ model = self._client.convert_model_name(model)
49
+ payload = {
50
+ "model": model,
51
+ "question": question,
52
+ "language": self._client.language,
53
+ "sessionId": self._client.session_id,
54
+ "previousQuestion": None,
55
+ "previousAnswer": None,
56
+ "imgUrls": [],
57
+ "superSmartMode": False
58
+ }
59
+
60
+ request_id = f"chatcmpl-{uuid.uuid4()}"
61
+ created_time = int(time.time())
62
+
63
+ if stream:
64
+ return self._create_stream(request_id, created_time, model, payload)
65
+ else:
66
+ return self._create_non_stream(request_id, created_time, model, payload)
67
+
68
+ def _create_stream(
69
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
70
+ ) -> Generator[ChatCompletionChunk, None, None]:
71
+ try:
72
+ response = self._client.session.post(
73
+ self._client.url,
74
+ headers=self._client.headers,
75
+ json=payload,
76
+ stream=True,
77
+ timeout=self._client.timeout
78
+ )
79
+ response.raise_for_status()
80
+
81
+ streaming_text = []
82
+ in_answer = False
83
+
84
+ for line in response.iter_lines(decode_unicode=True):
85
+ if not line:
86
+ continue
87
+ if line.startswith("data: "):
88
+ data = line[6:]
89
+ else:
90
+ continue
91
+ if data == "[ANSWER_START]":
92
+ in_answer = True
93
+ continue
94
+ if data == "[ANSWER_DONE]":
95
+ in_answer = False
96
+ continue
97
+ if data.startswith("[") and data.endswith("]"):
98
+ continue
99
+ if in_answer:
100
+ # Fix encoding issues (e.g., emoji) for each chunk
101
+ try:
102
+ data_fixed = data.encode('latin1').decode('utf-8')
103
+ except (UnicodeEncodeError, UnicodeDecodeError):
104
+ data_fixed = data
105
+ streaming_text.append(data_fixed)
106
+ delta = ChoiceDelta(content=data_fixed)
107
+ choice = Choice(index=0, delta=delta, finish_reason=None)
108
+ chunk = ChatCompletionChunk(
109
+ id=request_id,
110
+ choices=[choice],
111
+ created=created_time,
112
+ model=model,
113
+ )
114
+ yield chunk
115
+ # Final chunk with finish_reason
116
+ delta = ChoiceDelta(content=None)
117
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
118
+ chunk = ChatCompletionChunk(
119
+ id=request_id,
120
+ choices=[choice],
121
+ created=created_time,
122
+ model=model,
123
+ )
124
+ yield chunk
125
+ except requests.exceptions.RequestException as e:
126
+ print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
127
+ raise IOError(f"HeckAI request failed: {e}") from e
128
+
129
+ def _create_non_stream(
130
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
131
+ ) -> ChatCompletion:
132
+ try:
133
+ answer_lines = []
134
+ in_answer = False
135
+ response = self._client.session.post(
136
+ self._client.url,
137
+ headers=self._client.headers,
138
+ json=payload,
139
+ stream=True,
140
+ timeout=self._client.timeout
141
+ )
142
+ response.raise_for_status()
143
+ for line in response.iter_lines(decode_unicode=True):
144
+ if not line:
145
+ continue
146
+ if line.startswith("data: "):
147
+ data = line[6:]
148
+ else:
149
+ continue
150
+ if data == "[ANSWER_START]":
151
+ in_answer = True
152
+ continue
153
+ if data == "[ANSWER_DONE]":
154
+ in_answer = False
155
+ continue
156
+ if data.startswith("[") and data.endswith("]"):
157
+ continue
158
+ if in_answer:
159
+ answer_lines.append(data)
160
+ full_text = " ".join(x.strip() for x in answer_lines if x.strip())
161
+ # Fix encoding issues (e.g., emoji)
162
+ try:
163
+ full_text = full_text.encode('latin1').decode('utf-8')
164
+ except (UnicodeEncodeError, UnicodeDecodeError):
165
+ pass
166
+ prompt_tokens = len(payload["question"]) // 4
167
+ completion_tokens = len(full_text) // 4
168
+ total_tokens = prompt_tokens + completion_tokens
169
+ usage = CompletionUsage(
170
+ prompt_tokens=prompt_tokens,
171
+ completion_tokens=completion_tokens,
172
+ total_tokens=total_tokens
173
+ )
174
+ message = ChatCompletionMessage(
175
+ role="assistant",
176
+ content=full_text)
177
+ choice = Choice(
178
+ index=0,
179
+ message=message,
180
+ finish_reason="stop"
181
+ )
182
+ completion = ChatCompletion(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ usage=usage,
188
+ )
189
+ return completion
190
+ except Exception as e:
191
+ print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
192
+ raise IOError(f"HeckAI request failed: {e}") from e
193
+
194
+ class Chat(BaseChat):
195
+ def __init__(self, client: 'HeckAI'):
196
+ self.completions = Completions(client)
197
+
198
+ class HeckAI(OpenAICompatibleProvider):
199
+ """
200
+ OpenAI-compatible client for HeckAI API.
201
+
202
+ Usage:
203
+ client = HeckAI()
204
+ response = client.chat.completions.create(
205
+ model="google/gemini-2.0-flash-001",
206
+ messages=[{"role": "user", "content": "Hello!"}]
207
+ )
208
+ print(response.choices[0].message.content)
209
+ """
210
+
211
+ AVAILABLE_MODELS = [
212
+ "google/gemini-2.0-flash-001",
213
+ "deepseek/deepseek-chat",
214
+ "deepseek/deepseek-r1",
215
+ "openai/gpt-4o-mini",
216
+ "openai/gpt-4.1-mini",
217
+ "x-ai/grok-3-mini-beta",
218
+ "meta-llama/llama-4-scout"
219
+
220
+ ]
221
+
222
+ def __init__(
223
+ self,
224
+ timeout: int = 30,
225
+ language: str = "English"
226
+ ):
227
+ """
228
+ Initialize the HeckAI client.
229
+
230
+ Args:
231
+ timeout: Request timeout in seconds.
232
+ language: Language for responses.
233
+ """
234
+ self.timeout = timeout
235
+ self.language = language
236
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
237
+ self.session_id = str(uuid.uuid4())
238
+
239
+ # Use LitAgent for user-agent
240
+ agent = LitAgent()
241
+ self.headers = {
242
+ 'User-Agent': agent.random(),
243
+ 'Content-Type': 'application/json',
244
+ 'Origin': 'https://heck.ai',
245
+ 'Referer': 'https://heck.ai/',
246
+ 'Connection': 'keep-alive'
247
+ }
248
+
249
+ self.session = requests.Session()
250
+ self.session.headers.update(self.headers)
251
+
252
+ # Initialize the chat interface
253
+ self.chat = Chat(self)
254
+
255
+ def convert_model_name(self, model: str) -> str:
256
+ """
257
+ Ensure the model name is in the correct format.
258
+ """
259
+ if model in self.AVAILABLE_MODELS:
260
+ return model
261
+
262
+ # Try to find a matching model
263
+ for available_model in self.AVAILABLE_MODELS:
264
+ if model.lower() in available_model.lower():
265
+ return available_model
266
+
267
+ # Default to gemini if no match
268
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
269
+ return "google/gemini-2.0-flash-001"
270
+
271
+ @property
272
+ def models(self):
273
+ class _ModelList:
274
+ def list(inner_self):
275
+ return type(self).AVAILABLE_MODELS
276
+ return _ModelList()
277
+
278
+ # Simple test if run directly
279
+ if __name__ == "__main__":
280
+ print("-" * 80)
281
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
282
+ print("-" * 80)
283
+
284
+ for model in HeckAI.AVAILABLE_MODELS:
285
+ try:
286
+ client = HeckAI(timeout=60)
287
+ # Test with a simple conversation to demonstrate format_prompt usage
288
+ response = client.chat.completions.create(
289
+ model=model,
290
+ messages=[
291
+ {"role": "system", "content": "You are a helpful assistant."},
292
+ {"role": "user", "content": "Say 'Hello' in one word"},
293
+ ],
294
+ stream=False
295
+ )
296
+
297
+ if response and response.choices and response.choices[0].message.content:
298
+ status = "✓"
299
+ # Truncate response if too long
300
+ display_text = response.choices[0].message.content.strip()
301
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
302
+ else:
303
+ status = "✗"
304
+ display_text = "Empty or invalid response"
305
+ print(f"{model:<50} {status:<10} {display_text}")
306
+ except Exception as e:
307
+ print(f"{model:<50} {'✗':<10} {str(e)}")