webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,364 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import curl_cffi for improved request handling
8
+ from curl_cffi.requests import Session
9
+ from curl_cffi import CurlError
10
+
11
+ # Import base classes and utility structures
12
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from .utils import (
14
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
+ ChatCompletionMessage, CompletionUsage
16
+ )
17
+
18
+ # Attempt to import LitAgent, fallback if not available
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ pass
23
+
24
+ # --- Groq Client ---
25
+
26
+ class Completions(BaseCompletions):
27
+ def __init__(self, client: 'Groq'):
28
+ self._client = client
29
+
30
+ def create(
31
+ self,
32
+ *,
33
+ model: str,
34
+ messages: List[Dict[str, str]],
35
+ max_tokens: Optional[int] = 2049,
36
+ stream: bool = False,
37
+ temperature: Optional[float] = None,
38
+ top_p: Optional[float] = None,
39
+ **kwargs: Any
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ payload = {
46
+ "model": model,
47
+ "messages": messages,
48
+ "max_tokens": max_tokens,
49
+ "stream": stream,
50
+ }
51
+ if temperature is not None:
52
+ payload["temperature"] = temperature
53
+ if top_p is not None:
54
+ payload["top_p"] = top_p
55
+
56
+ # Add frequency_penalty and presence_penalty if provided
57
+ if "frequency_penalty" in kwargs:
58
+ payload["frequency_penalty"] = kwargs.pop("frequency_penalty")
59
+ if "presence_penalty" in kwargs:
60
+ payload["presence_penalty"] = kwargs.pop("presence_penalty")
61
+
62
+ # Add any tools if provided
63
+ if "tools" in kwargs and kwargs["tools"]:
64
+ payload["tools"] = kwargs.pop("tools")
65
+
66
+ payload.update(kwargs)
67
+
68
+ request_id = f"chatcmpl-{uuid.uuid4()}"
69
+ created_time = int(time.time())
70
+
71
+ if stream:
72
+ return self._create_stream(request_id, created_time, model, payload)
73
+ else:
74
+ return self._create_non_stream(request_id, created_time, model, payload)
75
+
76
+ def _create_stream(
77
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
78
+ ) -> Generator[ChatCompletionChunk, None, None]:
79
+ try:
80
+ response = self._client.session.post(
81
+ self._client.base_url,
82
+ json=payload,
83
+ stream=True,
84
+ timeout=self._client.timeout,
85
+ impersonate="chrome110" # Use impersonate for better compatibility
86
+ )
87
+
88
+ if response.status_code != 200:
89
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
90
+
91
+ # Track token usage across chunks
92
+ prompt_tokens = 0
93
+ completion_tokens = 0
94
+ total_tokens = 0
95
+
96
+ for line in response.iter_lines(decode_unicode=True):
97
+ if line:
98
+ if line.startswith("data: "):
99
+ json_str = line[6:]
100
+ if json_str == "[DONE]":
101
+ break
102
+
103
+ try:
104
+ data = json.loads(json_str)
105
+ choice_data = data.get('choices', [{}])[0]
106
+ delta_data = choice_data.get('delta', {})
107
+ finish_reason = choice_data.get('finish_reason')
108
+
109
+ # Update token counts if available
110
+ usage_data = data.get('usage', {})
111
+ if usage_data:
112
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
113
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
114
+ total_tokens = usage_data.get('total_tokens', total_tokens)
115
+
116
+ # Create the delta object
117
+ delta = ChoiceDelta(
118
+ content=delta_data.get('content'),
119
+ role=delta_data.get('role'),
120
+ tool_calls=delta_data.get('tool_calls')
121
+ )
122
+
123
+ # Create the choice object
124
+ choice = Choice(
125
+ index=choice_data.get('index', 0),
126
+ delta=delta,
127
+ finish_reason=finish_reason,
128
+ logprobs=choice_data.get('logprobs')
129
+ )
130
+
131
+ # Create the chunk object
132
+ chunk = ChatCompletionChunk(
133
+ id=request_id,
134
+ choices=[choice],
135
+ created=created_time,
136
+ model=model,
137
+ system_fingerprint=data.get('system_fingerprint')
138
+ )
139
+
140
+ # Convert chunk to dict using Pydantic's API
141
+ if hasattr(chunk, "model_dump"):
142
+ chunk_dict = chunk.model_dump(exclude_none=True)
143
+ else:
144
+ chunk_dict = chunk.dict(exclude_none=True)
145
+
146
+ # Add usage information to match OpenAI format
147
+ usage_dict = {
148
+ "prompt_tokens": prompt_tokens or 10,
149
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
150
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
151
+ "estimated_cost": None
152
+ }
153
+
154
+ # Update completion_tokens and total_tokens as we receive more content
155
+ if delta_data.get('content'):
156
+ completion_tokens += 1
157
+ total_tokens = prompt_tokens + completion_tokens
158
+ usage_dict["completion_tokens"] = completion_tokens
159
+ usage_dict["total_tokens"] = total_tokens
160
+
161
+ chunk_dict["usage"] = usage_dict
162
+
163
+ yield chunk
164
+ except json.JSONDecodeError:
165
+ print(f"Warning: Could not decode JSON line: {json_str}")
166
+ continue
167
+ except CurlError as e:
168
+ print(f"Error during Groq stream request: {e}")
169
+ raise IOError(f"Groq request failed: {e}") from e
170
+ except Exception as e:
171
+ print(f"Error processing Groq stream: {e}")
172
+ raise
173
+
174
+ def _create_non_stream(
175
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
176
+ ) -> ChatCompletion:
177
+ try:
178
+ response = self._client.session.post(
179
+ self._client.base_url,
180
+ json=payload,
181
+ timeout=self._client.timeout,
182
+ impersonate="chrome110" # Use impersonate for better compatibility
183
+ )
184
+
185
+ if response.status_code != 200:
186
+ raise IOError(f"Groq request failed with status code {response.status_code}: {response.text}")
187
+
188
+ data = response.json()
189
+
190
+ choices_data = data.get('choices', [])
191
+ usage_data = data.get('usage', {})
192
+
193
+ choices = []
194
+ for choice_d in choices_data:
195
+ message_d = choice_d.get('message', {})
196
+
197
+ # Handle tool calls if present
198
+ tool_calls = message_d.get('tool_calls')
199
+
200
+ message = ChatCompletionMessage(
201
+ role=message_d.get('role', 'assistant'),
202
+ content=message_d.get('content', ''),
203
+ tool_calls=tool_calls
204
+ )
205
+ choice = Choice(
206
+ index=choice_d.get('index', 0),
207
+ message=message,
208
+ finish_reason=choice_d.get('finish_reason', 'stop')
209
+ )
210
+ choices.append(choice)
211
+
212
+ usage = CompletionUsage(
213
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
214
+ completion_tokens=usage_data.get('completion_tokens', 0),
215
+ total_tokens=usage_data.get('total_tokens', 0)
216
+ )
217
+
218
+ completion = ChatCompletion(
219
+ id=request_id,
220
+ choices=choices,
221
+ created=created_time,
222
+ model=data.get('model', model),
223
+ usage=usage,
224
+ )
225
+ return completion
226
+
227
+ except CurlError as e:
228
+ print(f"Error during Groq non-stream request: {e}")
229
+ raise IOError(f"Groq request failed: {e}") from e
230
+ except Exception as e:
231
+ print(f"Error processing Groq response: {e}")
232
+ raise
233
+
234
+ class Chat(BaseChat):
235
+ def __init__(self, client: 'Groq'):
236
+ self.completions = Completions(client)
237
+
238
+ class Groq(OpenAICompatibleProvider):
239
+ AVAILABLE_MODELS = [
240
+ "distil-whisper-large-v3-en",
241
+ "gemma2-9b-it",
242
+ "llama-3.3-70b-versatile",
243
+ "llama-3.1-8b-instant",
244
+ "llama-guard-3-8b",
245
+ "llama3-70b-8192",
246
+ "llama3-8b-8192",
247
+ "whisper-large-v3",
248
+ "whisper-large-v3-turbo",
249
+ "meta-llama/llama-4-scout-17b-16e-instruct",
250
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
251
+ "playai-tts",
252
+ "playai-tts-arabic",
253
+ "qwen-qwq-32b",
254
+ "mistral-saba-24b",
255
+ "qwen-2.5-coder-32b",
256
+ "qwen-2.5-32b",
257
+ "deepseek-r1-distill-qwen-32b",
258
+ "deepseek-r1-distill-llama-70b",
259
+ "llama-3.3-70b-specdec",
260
+ "llama-3.2-1b-preview",
261
+ "llama-3.2-3b-preview",
262
+ "llama-3.2-11b-vision-preview",
263
+ "llama-3.2-90b-vision-preview",
264
+ "mixtral-8x7b-32768"
265
+ ]
266
+
267
+ def __init__(self, api_key: str = None, timeout: Optional[int] = 30, browser: str = "chrome"):
268
+ self.timeout = timeout
269
+ self.base_url = "https://api.groq.com/openai/v1/chat/completions"
270
+ self.api_key = api_key
271
+
272
+ # Initialize curl_cffi Session
273
+ self.session = Session()
274
+
275
+ # Set up headers with API key if provided
276
+ self.headers = {
277
+ "Content-Type": "application/json",
278
+ }
279
+
280
+ if api_key:
281
+ self.headers["Authorization"] = f"Bearer {api_key}"
282
+
283
+ # Try to use LitAgent for browser fingerprinting
284
+ try:
285
+ agent = LitAgent()
286
+ fingerprint = agent.generate_fingerprint(browser)
287
+
288
+ self.headers.update({
289
+ "Accept": fingerprint["accept"],
290
+ "Accept-Encoding": "gzip, deflate, br, zstd",
291
+ "Accept-Language": fingerprint["accept_language"],
292
+ "Cache-Control": "no-cache",
293
+ "Connection": "keep-alive",
294
+ "Origin": "https://console.groq.com",
295
+ "Pragma": "no-cache",
296
+ "Referer": "https://console.groq.com/",
297
+ "Sec-Fetch-Dest": "empty",
298
+ "Sec-Fetch-Mode": "cors",
299
+ "Sec-Fetch-Site": "same-site",
300
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
301
+ "Sec-CH-UA-Mobile": "?0",
302
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
303
+ "User-Agent": fingerprint["user_agent"],
304
+ })
305
+ except (NameError, Exception):
306
+ # Fallback to basic headers if LitAgent is not available
307
+ self.headers.update({
308
+ "Accept": "application/json",
309
+ "Accept-Encoding": "gzip, deflate, br",
310
+ "Accept-Language": "en-US,en;q=0.9",
311
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
312
+ })
313
+
314
+ # Update session headers
315
+ self.session.headers.update(self.headers)
316
+
317
+ # Initialize chat interface
318
+ self.chat = Chat(self)
319
+
320
+ @classmethod
321
+ def get_models(cls, api_key: str = None):
322
+ """Fetch available models from Groq API.
323
+
324
+ Args:
325
+ api_key (str, optional): Groq API key. If not provided, returns default models.
326
+
327
+ Returns:
328
+ list: List of available model IDs
329
+ """
330
+ if not api_key:
331
+ return cls.AVAILABLE_MODELS
332
+
333
+ try:
334
+ # Use a temporary curl_cffi session for this class method
335
+ temp_session = Session()
336
+ headers = {
337
+ "Content-Type": "application/json",
338
+ "Authorization": f"Bearer {api_key}",
339
+ }
340
+
341
+ response = temp_session.get(
342
+ "https://api.groq.com/openai/v1/models",
343
+ headers=headers,
344
+ impersonate="chrome110" # Use impersonate for fetching
345
+ )
346
+
347
+ if response.status_code != 200:
348
+ return cls.AVAILABLE_MODELS
349
+
350
+ data = response.json()
351
+ if "data" in data and isinstance(data["data"], list):
352
+ return [model["id"] for model in data["data"]]
353
+ return cls.AVAILABLE_MODELS
354
+
355
+ except (CurlError, Exception):
356
+ # Fallback to default models list if fetching fails
357
+ return cls.AVAILABLE_MODELS
358
+
359
+ @property
360
+ def models(self):
361
+ class _ModelList:
362
+ def list(inner_self):
363
+ return type(self).AVAILABLE_MODELS
364
+ return _ModelList()
@@ -0,0 +1,308 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+
6
+ from webscout.litagent import LitAgent
7
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
+ from .utils import (
9
+ ChatCompletion,
10
+ ChatCompletionChunk,
11
+ Choice,
12
+ ChatCompletionMessage,
13
+ ChoiceDelta,
14
+ CompletionUsage,
15
+ format_prompt,
16
+ count_tokens
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ class Completions(BaseCompletions):
25
+ def __init__(self, client: 'HeckAI'):
26
+ self._client = client
27
+
28
+ def create(
29
+ self,
30
+ *,
31
+ model: str,
32
+ messages: List[Dict[str, str]],
33
+ max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
34
+ stream: bool = False,
35
+ temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
+ top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
37
+ **kwargs: Any # Not used by HeckAI but kept for compatibility
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+ Mimics openai.chat.completions.create
42
+ """
43
+ # Format the messages using the format_prompt utility
44
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
45
+ # HeckAI works better with a properly formatted conversation
46
+ question = format_prompt(messages, add_special_tokens=True)
47
+
48
+ # Prepare the payload for HeckAI API
49
+ model = self._client.convert_model_name(model)
50
+ payload = {
51
+ "model": model,
52
+ "question": question,
53
+ "language": self._client.language,
54
+ "sessionId": self._client.session_id,
55
+ "previousQuestion": None,
56
+ "previousAnswer": None,
57
+ "imgUrls": [],
58
+ "superSmartMode": False
59
+ }
60
+
61
+ request_id = f"chatcmpl-{uuid.uuid4()}"
62
+ created_time = int(time.time())
63
+
64
+ if stream:
65
+ return self._create_stream(request_id, created_time, model, payload)
66
+ else:
67
+ return self._create_non_stream(request_id, created_time, model, payload)
68
+
69
+ def _create_stream(
70
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ ) -> Generator[ChatCompletionChunk, None, None]:
72
+ try:
73
+ response = self._client.session.post(
74
+ self._client.url,
75
+ headers=self._client.headers,
76
+ json=payload,
77
+ stream=True,
78
+ timeout=self._client.timeout
79
+ )
80
+ response.raise_for_status()
81
+
82
+ streaming_text = []
83
+ in_answer = False
84
+
85
+ for line in response.iter_lines(decode_unicode=True):
86
+ if not line:
87
+ continue
88
+ if line.startswith("data: "):
89
+ data = line[6:]
90
+ else:
91
+ continue
92
+ if data == "[ANSWER_START]":
93
+ in_answer = True
94
+ continue
95
+ if data == "[ANSWER_DONE]":
96
+ in_answer = False
97
+ continue
98
+ if data.startswith("[") and data.endswith("]"):
99
+ continue
100
+ if in_answer:
101
+ # Fix encoding issues (e.g., emoji) for each chunk
102
+ try:
103
+ data_fixed = data.encode('latin1').decode('utf-8')
104
+ except (UnicodeEncodeError, UnicodeDecodeError):
105
+ data_fixed = data
106
+ streaming_text.append(data_fixed)
107
+ delta = ChoiceDelta(content=data_fixed)
108
+ choice = Choice(index=0, delta=delta, finish_reason=None)
109
+ chunk = ChatCompletionChunk(
110
+ id=request_id,
111
+ choices=[choice],
112
+ created=created_time,
113
+ model=model,
114
+ )
115
+ yield chunk
116
+ # Final chunk with finish_reason
117
+ delta = ChoiceDelta(content=None)
118
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
119
+ chunk = ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[choice],
122
+ created=created_time,
123
+ model=model,
124
+ )
125
+ yield chunk
126
+ except requests.exceptions.RequestException as e:
127
+ print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
128
+ raise IOError(f"HeckAI request failed: {e}") from e
129
+
130
+ def _create_non_stream(
131
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
132
+ ) -> ChatCompletion:
133
+ try:
134
+ answer_lines = []
135
+ in_answer = False
136
+ response = self._client.session.post(
137
+ self._client.url,
138
+ headers=self._client.headers,
139
+ json=payload,
140
+ stream=True,
141
+ timeout=self._client.timeout
142
+ )
143
+ response.raise_for_status()
144
+ for line in response.iter_lines(decode_unicode=True):
145
+ if not line:
146
+ continue
147
+ if line.startswith("data: "):
148
+ data = line[6:]
149
+ else:
150
+ continue
151
+ if data == "[ANSWER_START]":
152
+ in_answer = True
153
+ continue
154
+ if data == "[ANSWER_DONE]":
155
+ in_answer = False
156
+ continue
157
+ if data.startswith("[") and data.endswith("]"):
158
+ continue
159
+ if in_answer:
160
+ answer_lines.append(data)
161
+ full_text = " ".join(x.strip() for x in answer_lines if x.strip())
162
+ # Fix encoding issues (e.g., emoji)
163
+ try:
164
+ full_text = full_text.encode('latin1').decode('utf-8')
165
+ except (UnicodeEncodeError, UnicodeDecodeError):
166
+ pass
167
+ prompt_tokens = count_tokens(payload.get("question", ""))
168
+ completion_tokens = count_tokens(full_text)
169
+ total_tokens = prompt_tokens + completion_tokens
170
+ usage = CompletionUsage(
171
+ prompt_tokens=prompt_tokens,
172
+ completion_tokens=completion_tokens,
173
+ total_tokens=total_tokens
174
+ )
175
+ message = ChatCompletionMessage(
176
+ role="assistant",
177
+ content=full_text)
178
+ choice = Choice(
179
+ index=0,
180
+ message=message,
181
+ finish_reason="stop"
182
+ )
183
+ completion = ChatCompletion(
184
+ id=request_id,
185
+ choices=[choice],
186
+ created=created_time,
187
+ model=model,
188
+ usage=usage,
189
+ )
190
+ return completion
191
+ except Exception as e:
192
+ print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
193
+ raise IOError(f"HeckAI request failed: {e}") from e
194
+
195
+ class Chat(BaseChat):
196
+ def __init__(self, client: 'HeckAI'):
197
+ self.completions = Completions(client)
198
+
199
+ class HeckAI(OpenAICompatibleProvider):
200
+ """
201
+ OpenAI-compatible client for HeckAI API.
202
+
203
+ Usage:
204
+ client = HeckAI()
205
+ response = client.chat.completions.create(
206
+ model="google/gemini-2.0-flash-001",
207
+ messages=[{"role": "user", "content": "Hello!"}]
208
+ )
209
+ print(response.choices[0].message.content)
210
+ """
211
+
212
+ AVAILABLE_MODELS = [
213
+ "google/gemini-2.0-flash-001",
214
+ "deepseek/deepseek-chat",
215
+ "deepseek/deepseek-r1",
216
+ "openai/gpt-4o-mini",
217
+ "openai/gpt-4.1-mini",
218
+ "x-ai/grok-3-mini-beta",
219
+ "meta-llama/llama-4-scout"
220
+
221
+ ]
222
+
223
+ def __init__(
224
+ self,
225
+ timeout: int = 30,
226
+ language: str = "English"
227
+ ):
228
+ """
229
+ Initialize the HeckAI client.
230
+
231
+ Args:
232
+ timeout: Request timeout in seconds.
233
+ language: Language for responses.
234
+ """
235
+ self.timeout = timeout
236
+ self.language = language
237
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
238
+ self.session_id = str(uuid.uuid4())
239
+
240
+ # Use LitAgent for user-agent
241
+ agent = LitAgent()
242
+ self.headers = {
243
+ 'User-Agent': agent.random(),
244
+ 'Content-Type': 'application/json',
245
+ 'Origin': 'https://heck.ai',
246
+ 'Referer': 'https://heck.ai/',
247
+ 'Connection': 'keep-alive'
248
+ }
249
+
250
+ self.session = requests.Session()
251
+ self.session.headers.update(self.headers)
252
+
253
+ # Initialize the chat interface
254
+ self.chat = Chat(self)
255
+
256
+ def convert_model_name(self, model: str) -> str:
257
+ """
258
+ Ensure the model name is in the correct format.
259
+ """
260
+ if model in self.AVAILABLE_MODELS:
261
+ return model
262
+
263
+ # Try to find a matching model
264
+ for available_model in self.AVAILABLE_MODELS:
265
+ if model.lower() in available_model.lower():
266
+ return available_model
267
+
268
+ # Default to gemini if no match
269
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
270
+ return "google/gemini-2.0-flash-001"
271
+
272
+ @property
273
+ def models(self):
274
+ class _ModelList:
275
+ def list(inner_self):
276
+ return type(self).AVAILABLE_MODELS
277
+ return _ModelList()
278
+
279
+ # Simple test if run directly
280
+ if __name__ == "__main__":
281
+ print("-" * 80)
282
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
283
+ print("-" * 80)
284
+
285
+ for model in HeckAI.AVAILABLE_MODELS:
286
+ try:
287
+ client = HeckAI(timeout=60)
288
+ # Test with a simple conversation to demonstrate format_prompt usage
289
+ response = client.chat.completions.create(
290
+ model=model,
291
+ messages=[
292
+ {"role": "system", "content": "You are a helpful assistant."},
293
+ {"role": "user", "content": "Say 'Hello' in one word"},
294
+ ],
295
+ stream=False
296
+ )
297
+
298
+ if response and response.choices and response.choices[0].message.content:
299
+ status = "✓"
300
+ # Truncate response if too long
301
+ display_text = response.choices[0].message.content.strip()
302
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
303
+ else:
304
+ status = "✗"
305
+ display_text = "Empty or invalid response"
306
+ print(f"{model:<50} {status:<10} {display_text}")
307
+ except Exception as e:
308
+ print(f"{model:<50} {'✗':<10} {str(e)}")