webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,307 +1,308 @@
1
- import time
2
- import uuid
3
- import requests
4
- from typing import List, Dict, Optional, Union, Generator, Any
5
-
6
- from webscout.litagent import LitAgent
7
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
- from .utils import (
9
- ChatCompletion,
10
- ChatCompletionChunk,
11
- Choice,
12
- ChatCompletionMessage,
13
- ChoiceDelta,
14
- CompletionUsage,
15
- format_prompt
16
- )
17
-
18
- # ANSI escape codes for formatting
19
- BOLD = "\033[1m"
20
- RED = "\033[91m"
21
- RESET = "\033[0m"
22
-
23
- class Completions(BaseCompletions):
24
- def __init__(self, client: 'HeckAI'):
25
- self._client = client
26
-
27
- def create(
28
- self,
29
- *,
30
- model: str,
31
- messages: List[Dict[str, str]],
32
- max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
33
- stream: bool = False,
34
- temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
35
- top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
- **kwargs: Any # Not used by HeckAI but kept for compatibility
37
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
- """
39
- Creates a model response for the given chat conversation.
40
- Mimics openai.chat.completions.create
41
- """
42
- # Format the messages using the format_prompt utility
43
- # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
44
- # HeckAI works better with a properly formatted conversation
45
- question = format_prompt(messages, add_special_tokens=True)
46
-
47
- # Prepare the payload for HeckAI API
48
- model = self._client.convert_model_name(model)
49
- payload = {
50
- "model": model,
51
- "question": question,
52
- "language": self._client.language,
53
- "sessionId": self._client.session_id,
54
- "previousQuestion": None,
55
- "previousAnswer": None,
56
- "imgUrls": [],
57
- "superSmartMode": False
58
- }
59
-
60
- request_id = f"chatcmpl-{uuid.uuid4()}"
61
- created_time = int(time.time())
62
-
63
- if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
65
- else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
67
-
68
- def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
70
- ) -> Generator[ChatCompletionChunk, None, None]:
71
- try:
72
- response = self._client.session.post(
73
- self._client.url,
74
- headers=self._client.headers,
75
- json=payload,
76
- stream=True,
77
- timeout=self._client.timeout
78
- )
79
- response.raise_for_status()
80
-
81
- streaming_text = []
82
- in_answer = False
83
-
84
- for line in response.iter_lines(decode_unicode=True):
85
- if not line:
86
- continue
87
- if line.startswith("data: "):
88
- data = line[6:]
89
- else:
90
- continue
91
- if data == "[ANSWER_START]":
92
- in_answer = True
93
- continue
94
- if data == "[ANSWER_DONE]":
95
- in_answer = False
96
- continue
97
- if data.startswith("[") and data.endswith("]"):
98
- continue
99
- if in_answer:
100
- # Fix encoding issues (e.g., emoji) for each chunk
101
- try:
102
- data_fixed = data.encode('latin1').decode('utf-8')
103
- except (UnicodeEncodeError, UnicodeDecodeError):
104
- data_fixed = data
105
- streaming_text.append(data_fixed)
106
- delta = ChoiceDelta(content=data_fixed)
107
- choice = Choice(index=0, delta=delta, finish_reason=None)
108
- chunk = ChatCompletionChunk(
109
- id=request_id,
110
- choices=[choice],
111
- created=created_time,
112
- model=model,
113
- )
114
- yield chunk
115
- # Final chunk with finish_reason
116
- delta = ChoiceDelta(content=None)
117
- choice = Choice(index=0, delta=delta, finish_reason="stop")
118
- chunk = ChatCompletionChunk(
119
- id=request_id,
120
- choices=[choice],
121
- created=created_time,
122
- model=model,
123
- )
124
- yield chunk
125
- except requests.exceptions.RequestException as e:
126
- print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
127
- raise IOError(f"HeckAI request failed: {e}") from e
128
-
129
- def _create_non_stream(
130
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
131
- ) -> ChatCompletion:
132
- try:
133
- answer_lines = []
134
- in_answer = False
135
- response = self._client.session.post(
136
- self._client.url,
137
- headers=self._client.headers,
138
- json=payload,
139
- stream=True,
140
- timeout=self._client.timeout
141
- )
142
- response.raise_for_status()
143
- for line in response.iter_lines(decode_unicode=True):
144
- if not line:
145
- continue
146
- if line.startswith("data: "):
147
- data = line[6:]
148
- else:
149
- continue
150
- if data == "[ANSWER_START]":
151
- in_answer = True
152
- continue
153
- if data == "[ANSWER_DONE]":
154
- in_answer = False
155
- continue
156
- if data.startswith("[") and data.endswith("]"):
157
- continue
158
- if in_answer:
159
- answer_lines.append(data)
160
- full_text = " ".join(x.strip() for x in answer_lines if x.strip())
161
- # Fix encoding issues (e.g., emoji)
162
- try:
163
- full_text = full_text.encode('latin1').decode('utf-8')
164
- except (UnicodeEncodeError, UnicodeDecodeError):
165
- pass
166
- prompt_tokens = len(payload["question"]) // 4
167
- completion_tokens = len(full_text) // 4
168
- total_tokens = prompt_tokens + completion_tokens
169
- usage = CompletionUsage(
170
- prompt_tokens=prompt_tokens,
171
- completion_tokens=completion_tokens,
172
- total_tokens=total_tokens
173
- )
174
- message = ChatCompletionMessage(
175
- role="assistant",
176
- content=full_text)
177
- choice = Choice(
178
- index=0,
179
- message=message,
180
- finish_reason="stop"
181
- )
182
- completion = ChatCompletion(
183
- id=request_id,
184
- choices=[choice],
185
- created=created_time,
186
- model=model,
187
- usage=usage,
188
- )
189
- return completion
190
- except Exception as e:
191
- print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
192
- raise IOError(f"HeckAI request failed: {e}") from e
193
-
194
- class Chat(BaseChat):
195
- def __init__(self, client: 'HeckAI'):
196
- self.completions = Completions(client)
197
-
198
- class HeckAI(OpenAICompatibleProvider):
199
- """
200
- OpenAI-compatible client for HeckAI API.
201
-
202
- Usage:
203
- client = HeckAI()
204
- response = client.chat.completions.create(
205
- model="google/gemini-2.0-flash-001",
206
- messages=[{"role": "user", "content": "Hello!"}]
207
- )
208
- print(response.choices[0].message.content)
209
- """
210
-
211
- AVAILABLE_MODELS = [
212
- "google/gemini-2.0-flash-001",
213
- "deepseek/deepseek-chat",
214
- "deepseek/deepseek-r1",
215
- "openai/gpt-4o-mini",
216
- "openai/gpt-4.1-mini",
217
- "x-ai/grok-3-mini-beta",
218
- "meta-llama/llama-4-scout"
219
-
220
- ]
221
-
222
- def __init__(
223
- self,
224
- timeout: int = 30,
225
- language: str = "English"
226
- ):
227
- """
228
- Initialize the HeckAI client.
229
-
230
- Args:
231
- timeout: Request timeout in seconds.
232
- language: Language for responses.
233
- """
234
- self.timeout = timeout
235
- self.language = language
236
- self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
237
- self.session_id = str(uuid.uuid4())
238
-
239
- # Use LitAgent for user-agent
240
- agent = LitAgent()
241
- self.headers = {
242
- 'User-Agent': agent.random(),
243
- 'Content-Type': 'application/json',
244
- 'Origin': 'https://heck.ai',
245
- 'Referer': 'https://heck.ai/',
246
- 'Connection': 'keep-alive'
247
- }
248
-
249
- self.session = requests.Session()
250
- self.session.headers.update(self.headers)
251
-
252
- # Initialize the chat interface
253
- self.chat = Chat(self)
254
-
255
- def convert_model_name(self, model: str) -> str:
256
- """
257
- Ensure the model name is in the correct format.
258
- """
259
- if model in self.AVAILABLE_MODELS:
260
- return model
261
-
262
- # Try to find a matching model
263
- for available_model in self.AVAILABLE_MODELS:
264
- if model.lower() in available_model.lower():
265
- return available_model
266
-
267
- # Default to gemini if no match
268
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
269
- return "google/gemini-2.0-flash-001"
270
-
271
- @property
272
- def models(self):
273
- class _ModelList:
274
- def list(inner_self):
275
- return type(self).AVAILABLE_MODELS
276
- return _ModelList()
277
-
278
- # Simple test if run directly
279
- if __name__ == "__main__":
280
- print("-" * 80)
281
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
282
- print("-" * 80)
283
-
284
- for model in HeckAI.AVAILABLE_MODELS:
285
- try:
286
- client = HeckAI(timeout=60)
287
- # Test with a simple conversation to demonstrate format_prompt usage
288
- response = client.chat.completions.create(
289
- model=model,
290
- messages=[
291
- {"role": "system", "content": "You are a helpful assistant."},
292
- {"role": "user", "content": "Say 'Hello' in one word"},
293
- ],
294
- stream=False
295
- )
296
-
297
- if response and response.choices and response.choices[0].message.content:
298
- status = "✓"
299
- # Truncate response if too long
300
- display_text = response.choices[0].message.content.strip()
301
- display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
302
- else:
303
- status = "✗"
304
- display_text = "Empty or invalid response"
305
- print(f"{model:<50} {status:<10} {display_text}")
306
- except Exception as e:
307
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import time
2
+ import uuid
3
+ import requests
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+
6
+ from webscout.litagent import LitAgent
7
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
8
+ from .utils import (
9
+ ChatCompletion,
10
+ ChatCompletionChunk,
11
+ Choice,
12
+ ChatCompletionMessage,
13
+ ChoiceDelta,
14
+ CompletionUsage,
15
+ format_prompt,
16
+ count_tokens
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ class Completions(BaseCompletions):
25
+ def __init__(self, client: 'HeckAI'):
26
+ self._client = client
27
+
28
+ def create(
29
+ self,
30
+ *,
31
+ model: str,
32
+ messages: List[Dict[str, str]],
33
+ max_tokens: Optional[int] = None, # Not used by HeckAI but kept for compatibility
34
+ stream: bool = False,
35
+ temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
+ top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
37
+ **kwargs: Any # Not used by HeckAI but kept for compatibility
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+ Mimics openai.chat.completions.create
42
+ """
43
+ # Format the messages using the format_prompt utility
44
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
45
+ # HeckAI works better with a properly formatted conversation
46
+ question = format_prompt(messages, add_special_tokens=True)
47
+
48
+ # Prepare the payload for HeckAI API
49
+ model = self._client.convert_model_name(model)
50
+ payload = {
51
+ "model": model,
52
+ "question": question,
53
+ "language": self._client.language,
54
+ "sessionId": self._client.session_id,
55
+ "previousQuestion": None,
56
+ "previousAnswer": None,
57
+ "imgUrls": [],
58
+ "superSmartMode": False
59
+ }
60
+
61
+ request_id = f"chatcmpl-{uuid.uuid4()}"
62
+ created_time = int(time.time())
63
+
64
+ if stream:
65
+ return self._create_stream(request_id, created_time, model, payload)
66
+ else:
67
+ return self._create_non_stream(request_id, created_time, model, payload)
68
+
69
+ def _create_stream(
70
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ ) -> Generator[ChatCompletionChunk, None, None]:
72
+ try:
73
+ response = self._client.session.post(
74
+ self._client.url,
75
+ headers=self._client.headers,
76
+ json=payload,
77
+ stream=True,
78
+ timeout=self._client.timeout
79
+ )
80
+ response.raise_for_status()
81
+
82
+ streaming_text = []
83
+ in_answer = False
84
+
85
+ for line in response.iter_lines(decode_unicode=True):
86
+ if not line:
87
+ continue
88
+ if line.startswith("data: "):
89
+ data = line[6:]
90
+ else:
91
+ continue
92
+ if data == "[ANSWER_START]":
93
+ in_answer = True
94
+ continue
95
+ if data == "[ANSWER_DONE]":
96
+ in_answer = False
97
+ continue
98
+ if data.startswith("[") and data.endswith("]"):
99
+ continue
100
+ if in_answer:
101
+ # Fix encoding issues (e.g., emoji) for each chunk
102
+ try:
103
+ data_fixed = data.encode('latin1').decode('utf-8')
104
+ except (UnicodeEncodeError, UnicodeDecodeError):
105
+ data_fixed = data
106
+ streaming_text.append(data_fixed)
107
+ delta = ChoiceDelta(content=data_fixed)
108
+ choice = Choice(index=0, delta=delta, finish_reason=None)
109
+ chunk = ChatCompletionChunk(
110
+ id=request_id,
111
+ choices=[choice],
112
+ created=created_time,
113
+ model=model,
114
+ )
115
+ yield chunk
116
+ # Final chunk with finish_reason
117
+ delta = ChoiceDelta(content=None)
118
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
119
+ chunk = ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[choice],
122
+ created=created_time,
123
+ model=model,
124
+ )
125
+ yield chunk
126
+ except requests.exceptions.RequestException as e:
127
+ print(f"{RED}Error during HeckAI stream request: {e}{RESET}")
128
+ raise IOError(f"HeckAI request failed: {e}") from e
129
+
130
+ def _create_non_stream(
131
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
132
+ ) -> ChatCompletion:
133
+ try:
134
+ answer_lines = []
135
+ in_answer = False
136
+ response = self._client.session.post(
137
+ self._client.url,
138
+ headers=self._client.headers,
139
+ json=payload,
140
+ stream=True,
141
+ timeout=self._client.timeout
142
+ )
143
+ response.raise_for_status()
144
+ for line in response.iter_lines(decode_unicode=True):
145
+ if not line:
146
+ continue
147
+ if line.startswith("data: "):
148
+ data = line[6:]
149
+ else:
150
+ continue
151
+ if data == "[ANSWER_START]":
152
+ in_answer = True
153
+ continue
154
+ if data == "[ANSWER_DONE]":
155
+ in_answer = False
156
+ continue
157
+ if data.startswith("[") and data.endswith("]"):
158
+ continue
159
+ if in_answer:
160
+ answer_lines.append(data)
161
+ full_text = " ".join(x.strip() for x in answer_lines if x.strip())
162
+ # Fix encoding issues (e.g., emoji)
163
+ try:
164
+ full_text = full_text.encode('latin1').decode('utf-8')
165
+ except (UnicodeEncodeError, UnicodeDecodeError):
166
+ pass
167
+ prompt_tokens = count_tokens(payload.get("question", ""))
168
+ completion_tokens = count_tokens(full_text)
169
+ total_tokens = prompt_tokens + completion_tokens
170
+ usage = CompletionUsage(
171
+ prompt_tokens=prompt_tokens,
172
+ completion_tokens=completion_tokens,
173
+ total_tokens=total_tokens
174
+ )
175
+ message = ChatCompletionMessage(
176
+ role="assistant",
177
+ content=full_text)
178
+ choice = Choice(
179
+ index=0,
180
+ message=message,
181
+ finish_reason="stop"
182
+ )
183
+ completion = ChatCompletion(
184
+ id=request_id,
185
+ choices=[choice],
186
+ created=created_time,
187
+ model=model,
188
+ usage=usage,
189
+ )
190
+ return completion
191
+ except Exception as e:
192
+ print(f"{RED}Error during HeckAI non-stream request: {e}{RESET}")
193
+ raise IOError(f"HeckAI request failed: {e}") from e
194
+
195
+ class Chat(BaseChat):
196
+ def __init__(self, client: 'HeckAI'):
197
+ self.completions = Completions(client)
198
+
199
+ class HeckAI(OpenAICompatibleProvider):
200
+ """
201
+ OpenAI-compatible client for HeckAI API.
202
+
203
+ Usage:
204
+ client = HeckAI()
205
+ response = client.chat.completions.create(
206
+ model="google/gemini-2.0-flash-001",
207
+ messages=[{"role": "user", "content": "Hello!"}]
208
+ )
209
+ print(response.choices[0].message.content)
210
+ """
211
+
212
+ AVAILABLE_MODELS = [
213
+ "google/gemini-2.0-flash-001",
214
+ "deepseek/deepseek-chat",
215
+ "deepseek/deepseek-r1",
216
+ "openai/gpt-4o-mini",
217
+ "openai/gpt-4.1-mini",
218
+ "x-ai/grok-3-mini-beta",
219
+ "meta-llama/llama-4-scout"
220
+
221
+ ]
222
+
223
+ def __init__(
224
+ self,
225
+ timeout: int = 30,
226
+ language: str = "English"
227
+ ):
228
+ """
229
+ Initialize the HeckAI client.
230
+
231
+ Args:
232
+ timeout: Request timeout in seconds.
233
+ language: Language for responses.
234
+ """
235
+ self.timeout = timeout
236
+ self.language = language
237
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
238
+ self.session_id = str(uuid.uuid4())
239
+
240
+ # Use LitAgent for user-agent
241
+ agent = LitAgent()
242
+ self.headers = {
243
+ 'User-Agent': agent.random(),
244
+ 'Content-Type': 'application/json',
245
+ 'Origin': 'https://heck.ai',
246
+ 'Referer': 'https://heck.ai/',
247
+ 'Connection': 'keep-alive'
248
+ }
249
+
250
+ self.session = requests.Session()
251
+ self.session.headers.update(self.headers)
252
+
253
+ # Initialize the chat interface
254
+ self.chat = Chat(self)
255
+
256
+ def convert_model_name(self, model: str) -> str:
257
+ """
258
+ Ensure the model name is in the correct format.
259
+ """
260
+ if model in self.AVAILABLE_MODELS:
261
+ return model
262
+
263
+ # Try to find a matching model
264
+ for available_model in self.AVAILABLE_MODELS:
265
+ if model.lower() in available_model.lower():
266
+ return available_model
267
+
268
+ # Default to gemini if no match
269
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'google/gemini-2.0-flash-001'{RESET}")
270
+ return "google/gemini-2.0-flash-001"
271
+
272
+ @property
273
+ def models(self):
274
+ class _ModelList:
275
+ def list(inner_self):
276
+ return type(self).AVAILABLE_MODELS
277
+ return _ModelList()
278
+
279
+ # Simple test if run directly
280
+ if __name__ == "__main__":
281
+ print("-" * 80)
282
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
283
+ print("-" * 80)
284
+
285
+ for model in HeckAI.AVAILABLE_MODELS:
286
+ try:
287
+ client = HeckAI(timeout=60)
288
+ # Test with a simple conversation to demonstrate format_prompt usage
289
+ response = client.chat.completions.create(
290
+ model=model,
291
+ messages=[
292
+ {"role": "system", "content": "You are a helpful assistant."},
293
+ {"role": "user", "content": "Say 'Hello' in one word"},
294
+ ],
295
+ stream=False
296
+ )
297
+
298
+ if response and response.choices and response.choices[0].message.content:
299
+ status = "✓"
300
+ # Truncate response if too long
301
+ display_text = response.choices[0].message.content.strip()
302
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
303
+ else:
304
+ status = ""
305
+ display_text = "Empty or invalid response"
306
+ print(f"{model:<50} {status:<10} {display_text}")
307
+ except Exception as e:
308
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -167,9 +167,15 @@ class Completions(BaseCompletions):
167
167
  # system_fingerprint=..., # Can be added if available in final event
168
168
  )
169
169
  # Add usage to the final chunk dictionary representation if available
170
- final_chunk_dict = final_chunk.to_dict()
170
+ if hasattr(final_chunk, "model_dump"):
171
+ final_chunk_dict = final_chunk.model_dump(exclude_none=True)
172
+ else:
173
+ final_chunk_dict = final_chunk.dict(exclude_none=True)
171
174
  if usage_obj:
172
- final_chunk_dict["usage"] = usage_obj.to_dict()
175
+ if hasattr(usage_obj, "model_dump"):
176
+ final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
177
+ else:
178
+ final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
173
179
 
174
180
  # Yield the final dictionary or object as needed by downstream consumers
175
181
  # Yielding the object aligns better with the generator type hint