webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,307 +1,311 @@
1
- import time
2
- import uuid
3
- import requests
4
- from typing import List, Dict, Optional, Union, Generator, Any
5
-
6
- from webscout.litagent import LitAgent
7
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
- from .utils import (
9
- ChatCompletion,
10
- ChatCompletionChunk,
11
- Choice,
12
- ChatCompletionMessage,
13
- ChoiceDelta,
14
- CompletionUsage,
15
- format_prompt
16
- )
17
-
18
- # ANSI escape codes for formatting
19
- BOLD = "\033[1m"
20
- RED = "\033[91m"
21
- RESET = "\033[0m"
22
-
23
- class Completions(BaseCompletions):
24
- def __init__(self, client: 'HeckAI'):
25
- self._client = client
26
-
27
- def create(
28
- self,
29
- *,
30
- model: str,
31
- messages: List[Dict[str, str]],
32
- max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
33
- stream: bool = False,
34
- temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
35
- top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
- **kwargs: Any # Not used by HeckAI but kept for compatibility
37
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
- """
39
- Creates a model response for the given chat conversation.
40
- Mimics openai.chat.completions.create
41
- """
42
- # Format the messages using the format_prompt utility
43
- # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
44
- # HeckAI works better with a properly formatted conversation
45
- question = format_prompt(messages, add_special_tokens=True)
46
-
47
- # Prepare the payload for HeckAI API
48
- model = self._client.convert_model_name(model)
49
- payload = {
50
- "model": model,
51
- "question": question,
52
- "language": self._client.language,
53
- "sessionId": self._client.session_id,
54
- "previousQuestion": None,
55
- "previousAnswer": None,
56
- "imgUrls": [],
57
- "superSmartMode": False
58
- }
59
-
60
- request_id = f"chatcmpl-{uuid.uuid4()}"
61
- created_time = int(time.time())
62
-
63
- if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
65
- else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
67
-
68
- def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
70
- ) -> Generator[ChatCompletionChunk, None, None]:
71
- try:
72
- response = self._client.session.post(
73
- self._client.url,
74
- headers=self._client.headers,
75
- json=payload,
76
- stream=True,
77
- timeout=self._client.timeout
78
- )
79
- response.raise_for_status()
80
-
81
- streaming_text = []
82
- in_answer = False
83
-
84
- for line in response.iter_lines(decode_unicode=True):
85
- if not line:
86
- continue
87
- if line.startswith("data: "):
88
- data = line[6:]
89
- else:
90
- continue
91
- if data == "[ANSWER_START]":
92
- in_answer = True
93
- continue
94
- if data == "[ANSWER_DONE]":
95
- in_answer = False
96
- continue
97
- if data.startswith("[") and data.endswith("]"):
98
- continue
99
- if in_answer:
100
- # Fix encoding issues (e.g., emoji) for each chunk
101
- try:
102
- data_fixed = data.encode('latin1').decode('utf-8')
103
- except (UnicodeEncodeError, UnicodeDecodeError):
104
- data_fixed = data
105
- streaming_text.append(data_fixed)
106
- delta = ChoiceDelta(content=data_fixed)
107
- choice = Choice(index=0, delta=delta, finish_reason=None)
108
- chunk = ChatCompletionChunk(
109
- id=request_id,
110
- choices=[choice],
111
- created=created_time,
112
- model=model,
113
- )
114
- yield chunk
115
- # Final chunk with finish_reason
116
- delta = ChoiceDelta(content=None)
117
- choice = Choice(index=0, delta=delta, finish_reason="stop")
118
- chunk = ChatCompletionChunk(
119
- id=request_id,
120
- choices=[choice],
121
- created=created_time,
122
- model=model,
123
- )
124
- yield chunk
125
- except requests.exceptions.RequestException as e:
126
- print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
127
- raise IOError(f"HeckAI request failed: {e}") from e
128
-
129
- def _create_non_stream(
130
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
131
- ) -> ChatCompletion:
132
- try:
133
- answer_lines = []
134
- in_answer = False
135
- response = self._client.session.post(
136
- self._client.url,
137
- headers=self._client.headers,
138
- json=payload,
139
- stream=True,
140
- timeout=self._client.timeout
141
- )
142
- response.raise_for_status()
143
- for line in response.iter_lines(decode_unicode=True):
144
- if not line:
145
- continue
146
- if line.startswith("data: "):
147
- data = line[6:]
148
- else:
149
- continue
150
- if data == "[ANSWER_START]":
151
- in_answer = True
152
- continue
153
- if data == "[ANSWER_DONE]":
154
- in_answer = False
155
- continue
156
- if data.startswith("[") and data.endswith("]"):
157
- continue
158
- if in_answer:
159
- answer_lines.append(data)
160
- full_text = " ".join(x.strip() for x in answer_lines if x.strip())
161
- # Fix encoding issues (e.g., emoji)
162
- try:
163
- full_text = full_text.encode('latin1').decode('utf-8')
164
- except (UnicodeEncodeError, UnicodeDecodeError):
165
- pass
166
- prompt_tokens = len(payload["question"]) // 4
167
- completion_tokens = len(full_text) // 4
168
- total_tokens = prompt_tokens + completion_tokens
169
- usage = CompletionUsage(
170
- prompt_tokens=prompt_tokens,
171
- completion_tokens=completion_tokens,
172
- total_tokens=total_tokens
173
- )
174
- message = ChatCompletionMessage(
175
- role="assistant",
176
- content=full_text)
177
- choice = Choice(
178
- index=0,
179
- message=message,
180
- finish_reason="stop"
181
- )
182
- completion = ChatCompletion(
183
- id=request_id,
184
- choices=[choice],
185
- created=created_time,
186
- model=model,
187
- usage=usage,
188
- )
189
- return completion
190
- except Exception as e:
191
- print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
192
- raise IOError(f"HeckAI request failed: {e}") from e
193
-
194
- class Chat(BaseChat):
195
- def __init__(self, client: 'HeckAI'):
196
- self.completions = Completions(client)
197
-
198
- class HeckAI(OpenAICompatibleProvider):
199
- """
200
- OpenAI-compatible client for HeckAI API.
201
-
202
- Usage:
203
- client = HeckAI()
204
- response = client.chat.completions.create(
205
- model="google/gemini-2.0-flash-001",
206
- messages=[{"role": "user", "content": "Hello!"}]
207
- )
208
- print(response.choices[0].message.content)
209
- """
210
-
211
- AVAILABLE_MODELS = [
212
- "google/gemini-2.0-flash-001",
213
- "deepseek/deepseek-chat",
214
- "deepseek/deepseek-r1",
215
- "openai/gpt-4o-mini",
216
- "openai/gpt-4.1-mini",
217
- "x-ai/grok-3-mini-beta",
218
- "meta-llama/llama-4-scout"
219
-
220
- ]
221
-
222
- def __init__(
223
- self,
224
- timeout: int = 30,
225
- language: str = "English"
226
- ):
227
- """
228
- Initialize the HeckAI client.
229
-
230
- Args:
231
- timeout: Request timeout in seconds.
232
- language: Language for responses.
233
- """
234
- self.timeout = timeout
235
- self.language = language
236
- self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
237
- self.session_id = str(uuid.uuid4())
238
-
239
- # Use LitAgent for user-agent
240
- agent = LitAgent()
241
- self.headers = {
242
- 'User-Agent': agent.random(),
243
- 'Content-Type': 'application/json',
244
- 'Origin': 'https://heck.ai',
245
- 'Referer': 'https://heck.ai/',
246
- 'Connection': 'keep-alive'
247
- }
248
-
249
- self.session = requests.Session()
250
- self.session.headers.update(self.headers)
251
-
252
- # Initialize the chat interface
253
- self.chat = Chat(self)
254
-
255
- def convert_model_name(self, model: str) -> str:
256
- """
257
- Ensure the model name is in the correct format.
258
- """
259
- if model in self.AVAILABLE_MODELS:
260
- return model
261
-
262
- # Try to find a matching model
263
- for available_model in self.AVAILABLE_MODELS:
264
- if model.lower() in available_model.lower():
265
- return available_model
266
-
267
- # Default to gemini if no match
268
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
269
- return "google/gemini-2.0-flash-001"
270
-
271
- @property
272
- def models(self):
273
- class _ModelList:
274
- def list(inner_self):
275
- return type(self).AVAILABLE_MODELS
276
- return _ModelList()
277
-
278
- # Simple test if run directly
279
- if __name__ == "__main__":
280
- print("-" * 80)
281
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
282
- print("-" * 80)
283
-
284
- for model in HeckAI.AVAILABLE_MODELS:
285
- try:
286
- client = HeckAI(timeout=60)
287
- # Test with a simple conversation to demonstrate format_prompt usage
288
- response = client.chat.completions.create(
289
- model=model,
290
- messages=[
291
- {"role": "system", "content": "You are a helpful assistant."},
292
- {"role": "user", "content": "Say 'Hello' in one word"},
293
- ],
294
- stream=False
295
- )
296
-
297
- if response and response.choices and response.choices[0].message.content:
298
- status = "✓"
299
- # Truncate response if too long
300
- display_text = response.choices[0].message.content.strip()
301
- display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
302
- else:
303
- status = "✗"
304
- display_text = "Empty or invalid response"
305
- print(f"{model:<50} {status:<10} {display_text}")
306
- except Exception as e:
307
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+
6
+ from webscout.litagent import LitAgent
7
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
+ from .utils import (
9
+ ChatCompletion,
10
+ ChatCompletionChunk,
11
+ Choice,
12
+ ChatCompletionMessage,
13
+ ChoiceDelta,
14
+ CompletionUsage,
15
+ format_prompt,
16
+ count_tokens
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ class Completions(BaseCompletions):
25
+ def __init__(self, client: 'HeckAI'):
26
+ self._client = client
27
+
28
+ def create(
29
+ self,
30
+ *,
31
+ model: str,
32
+ messages: List[Dict[str, str]],
33
+ max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
34
+ stream: bool = False,
35
+ temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
+ top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
39
+ **kwargs: Any # Not used by HeckAI but kept for compatibility
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ # Format the messages using the format_prompt utility
46
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
47
+ # HeckAI works better with a properly formatted conversation
48
+ question = format_prompt(messages, add_special_tokens=True)
49
+
50
+ # Prepare the payload for HeckAI API
51
+ model = self._client.convert_model_name(model)
52
+ payload = {
53
+ "model": model,
54
+ "question": question,
55
+ "language": self._client.language,
56
+ "sessionId": self._client.session_id,
57
+ "previousQuestion": None,
58
+ "previousAnswer": None,
59
+ "imgUrls": [],
60
+ "superSmartMode": False
61
+ }
62
+
63
+ request_id = f"chatcmpl-{uuid.uuid4()}"
64
+ created_time = int(time.time())
65
+
66
+ if stream:
67
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
68
+ else:
69
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
70
+
71
+ def _create_stream(
72
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
73
+ ) -> Generator[ChatCompletionChunk, None, None]:
74
+ try:
75
+ response = self._client.session.post(
76
+ self._client.url,
77
+ headers=self._client.headers,
78
+ json=payload,
79
+ stream=True,
80
+ timeout=timeout or self._client.timeout,
81
+ proxies=proxies or getattr(self._client, "proxies", None)
82
+ )
83
+ response.raise_for_status()
84
+
85
+ streaming_text = []
86
+ in_answer = False
87
+
88
+ for line in response.iter_lines(decode_unicode=True):
89
+ if not line:
90
+ continue
91
+ if line.startswith("data: "):
92
+ data = line[6:]
93
+ else:
94
+ continue
95
+ if data == "[ANSWER_START]":
96
+ in_answer = True
97
+ continue
98
+ if data == "[ANSWER_DONE]":
99
+ in_answer = False
100
+ continue
101
+ if data.startswith("[") and data.endswith("]"):
102
+ continue
103
+ if in_answer:
104
+ # Fix encoding issues (e.g., emoji) for each chunk
105
+ try:
106
+ data_fixed = data.encode('latin1').decode('utf-8')
107
+ except (UnicodeEncodeError, UnicodeDecodeError):
108
+ data_fixed = data
109
+ streaming_text.append(data_fixed)
110
+ delta = ChoiceDelta(content=data_fixed)
111
+ choice = Choice(index=0, delta=delta, finish_reason=None)
112
+ chunk = ChatCompletionChunk(
113
+ id=request_id,
114
+ choices=[choice],
115
+ created=created_time,
116
+ model=model,
117
+ )
118
+ yield chunk
119
+ # Final chunk with finish_reason
120
+ delta = ChoiceDelta(content=None)
121
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
122
+ chunk = ChatCompletionChunk(
123
+ id=request_id,
124
+ choices=[choice],
125
+ created=created_time,
126
+ model=model,
127
+ )
128
+ yield chunk
129
+ except requests.exceptions.RequestException as e:
130
+ print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
131
+ raise IOError(f"HeckAI request failed: {e}") from e
132
+
133
+ def _create_non_stream(
134
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
135
+ ) -> ChatCompletion:
136
+ try:
137
+ answer_lines = []
138
+ in_answer = False
139
+ response = self._client.session.post(
140
+ self._client.url,
141
+ headers=self._client.headers,
142
+ json=payload,
143
+ stream=True,
144
+ timeout=timeout or self._client.timeout,
145
+ proxies=proxies or getattr(self._client, "proxies", None)
146
+ )
147
+ response.raise_for_status()
148
+ for line in response.iter_lines(decode_unicode=True):
149
+ if not line:
150
+ continue
151
+ if line.startswith("data: "):
152
+ data = line[6:]
153
+ else:
154
+ continue
155
+ if data == "[ANSWER_START]":
156
+ in_answer = True
157
+ continue
158
+ if data == "[ANSWER_DONE]":
159
+ in_answer = False
160
+ continue
161
+ if data.startswith("[") and data.endswith("]"):
162
+ continue
163
+ if in_answer:
164
+ answer_lines.append(data)
165
+ full_text = " ".join(x.strip() for x in answer_lines if x.strip())
166
+ # Fix encoding issues (e.g., emoji)
167
+ try:
168
+ full_text = full_text.encode('latin1').decode('utf-8')
169
+ except (UnicodeEncodeError, UnicodeDecodeError):
170
+ pass
171
+ prompt_tokens = count_tokens(payload.get("question", ""))
172
+ completion_tokens = count_tokens(full_text)
173
+ total_tokens = prompt_tokens + completion_tokens
174
+ usage = CompletionUsage(
175
+ prompt_tokens=prompt_tokens,
176
+ completion_tokens=completion_tokens,
177
+ total_tokens=total_tokens
178
+ )
179
+ message = ChatCompletionMessage(
180
+ role="assistant",
181
+ content=full_text)
182
+ choice = Choice(
183
+ index=0,
184
+ message=message,
185
+ finish_reason="stop"
186
+ )
187
+ completion = ChatCompletion(
188
+ id=request_id,
189
+ choices=[choice],
190
+ created=created_time,
191
+ model=model,
192
+ usage=usage,
193
+ )
194
+ return completion
195
+ except Exception as e:
196
+ print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
197
+ raise IOError(f"HeckAI request failed: {e}") from e
198
+
199
+ class Chat(BaseChat):
200
+ def __init__(self, client: 'HeckAI'):
201
+ self.completions = Completions(client)
202
+
203
+ class HeckAI(OpenAICompatibleProvider):
204
+ """
205
+ OpenAI-compatible client for HeckAI API.
206
+
207
+ Usage:
208
+ client = HeckAI()
209
+ response = client.chat.completions.create(
210
+ model="google/gemini-2.0-flash-001",
211
+ messages=[{"role": "user", "content": "Hello!"}]
212
+ )
213
+ print(response.choices[0].message.content)
214
+ """
215
+
216
+ AVAILABLE_MODELS = [
217
+ "google/gemini-2.5-flash-preview",
218
+ "deepseek/deepseek-chat",
219
+ "deepseek/deepseek-r1",
220
+ "openai/gpt-4o-mini",
221
+ "openai/gpt-4.1-mini",
222
+ "x-ai/grok-3-mini-beta",
223
+ "meta-llama/llama-4-scout"
224
+ ]
225
+
226
+ def __init__(
227
+ self,
228
+ timeout: int = 30,
229
+ language: str = "English"
230
+ ):
231
+ """
232
+ Initialize the HeckAI client.
233
+
234
+ Args:
235
+ timeout: Request timeout in seconds.
236
+ language: Language for responses.
237
+ """
238
+ self.timeout = timeout
239
+ self.language = language
240
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
241
+ self.session_id = str(uuid.uuid4())
242
+
243
+ # Use LitAgent for user-agent
244
+ agent = LitAgent()
245
+ self.headers = {
246
+ 'User-Agent': agent.random(),
247
+ 'Content-Type': 'application/json',
248
+ 'Origin': 'https://heck.ai',
249
+ 'Referer': 'https://heck.ai/',
250
+ 'Connection': 'keep-alive'
251
+ }
252
+
253
+ self.session = requests.Session()
254
+ self.session.headers.update(self.headers)
255
+
256
+ # Initialize the chat interface
257
+ self.chat = Chat(self)
258
+
259
+ def convert_model_name(self, model: str) -> str:
260
+ """
261
+ Ensure the model name is in the correct format.
262
+ """
263
+ if model in self.AVAILABLE_MODELS:
264
+ return model
265
+
266
+ # Try to find a matching model
267
+ for available_model in self.AVAILABLE_MODELS:
268
+ if model.lower() in available_model.lower():
269
+ return available_model
270
+
271
+ # Default to gemini if no match
272
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
273
+ return "google/gemini-2.0-flash-001"
274
+
275
+ @property
276
+ def models(self):
277
+ class _ModelList:
278
+ def list(inner_self):
279
+ return type(self).AVAILABLE_MODELS
280
+ return _ModelList()
281
+
282
+ # Simple test if run directly
283
+ if __name__ == "__main__":
284
+ print("-" * 80)
285
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
286
+ print("-" * 80)
287
+
288
+ for model in HeckAI.AVAILABLE_MODELS:
289
+ try:
290
+ client = HeckAI(timeout=60)
291
+ # Test with a simple conversation to demonstrate format_prompt usage
292
+ response = client.chat.completions.create(
293
+ model=model,
294
+ messages=[
295
+ {"role": "system", "content": "You are a helpful assistant."},
296
+ {"role": "user", "content": "Say 'Hello' in one word"},
297
+ ],
298
+ stream=False
299
+ )
300
+
301
+ if response and response.choices and response.choices[0].message.content:
302
+ status = "✓"
303
+ # Truncate response if too long
304
+ display_text = response.choices[0].message.content.strip()
305
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
306
+ else:
307
+ status = "✗"
308
+ display_text = "Empty or invalid response"
309
+ print(f"{model:<50} {status:<10} {display_text}")
310
+ except Exception as e:
311
+ print(f"{model:<50} {'✗':<10} {str(e)}")