webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -13,11 +13,31 @@ from webscout.litagent import LitAgent
13
13
 
14
14
  class HeckAI(Provider):
15
15
  """
16
- A class to interact with the HeckAI API with LitAgent user-agent.
16
+ Provides an interface to interact with the HeckAI API using a LitAgent user-agent.
17
+
18
+ This class supports conversational AI interactions with multiple available models,
19
+ manages session state, handles streaming and non-streaming responses, and integrates
20
+ with conversation history and prompt optimizers.
21
+
22
+ Attributes:
23
+ AVAILABLE_MODELS (list): List of supported model identifiers.
24
+ url (str): API endpoint URL.
25
+ session_id (str): Unique session identifier for the conversation.
26
+ language (str): Language for the conversation.
27
+ headers (dict): HTTP headers used for API requests.
28
+ session (Session): curl_cffi session for HTTP requests.
29
+ is_conversation (bool): Whether to maintain conversation history.
30
+ max_tokens_to_sample (int): Maximum tokens to sample (not used by API).
31
+ timeout (int): Request timeout in seconds.
32
+ last_response (dict): Stores the last API response.
33
+ model (str): Model identifier in use.
34
+ previous_question (str): Last question sent to the API.
35
+ previous_answer (str): Last answer received from the API.
36
+ conversation (Conversation): Conversation history manager.
17
37
  """
18
38
 
19
39
  AVAILABLE_MODELS = [
20
- "google/gemini-2.0-flash-001",
40
+ "google/gemini-2.5-flash-preview",
21
41
  "deepseek/deepseek-chat",
22
42
  "deepseek/deepseek-r1",
23
43
  "openai/gpt-4o-mini",
@@ -29,7 +49,7 @@ class HeckAI(Provider):
29
49
  def __init__(
30
50
  self,
31
51
  is_conversation: bool = True,
32
- max_tokens: int = 2049, # Note: max_tokens is not used by this API
52
+ max_tokens: int = 2049,
33
53
  timeout: int = 30,
34
54
  intro: str = None,
35
55
  filepath: str = None,
@@ -40,7 +60,25 @@ class HeckAI(Provider):
40
60
  model: str = "google/gemini-2.0-flash-001",
41
61
  language: str = "English"
42
62
  ):
43
- """Initializes the HeckAI API client."""
63
+ """
64
+ Initializes the HeckAI API client.
65
+
66
+ Args:
67
+ is_conversation (bool): Whether to maintain conversation history.
68
+ max_tokens (int): Maximum tokens to sample (not used by this API).
69
+ timeout (int): Timeout for API requests in seconds.
70
+ intro (str, optional): Introductory prompt for the conversation.
71
+ filepath (str, optional): File path for storing conversation history.
72
+ update_file (bool): Whether to update the conversation file.
73
+ proxies (dict): Proxy settings for HTTP requests.
74
+ history_offset (int): Offset for conversation history truncation.
75
+ act (str, optional): Role or act for the conversation.
76
+ model (str): Model identifier to use.
77
+ language (str): Language for the conversation.
78
+
79
+ Raises:
80
+ ValueError: If the provided model is not in AVAILABLE_MODELS.
81
+ """
44
82
  if model not in self.AVAILABLE_MODELS:
45
83
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
84
 
@@ -53,6 +91,7 @@ class HeckAI(Provider):
53
91
  'Content-Type': 'application/json',
54
92
  'Origin': 'https://heck.ai', # Keep Origin
55
93
  'Referer': 'https://heck.ai/', # Keep Referer
94
+ 'User-Agent': LitAgent().random(), # Use random user agent
56
95
  }
57
96
 
58
97
  # Initialize curl_cffi Session
@@ -90,11 +129,29 @@ class HeckAI(Provider):
90
129
  def ask(
91
130
  self,
92
131
  prompt: str,
93
- stream: bool = False, # API supports streaming
132
+ stream: bool = False,
94
133
  raw: bool = False,
95
134
  optimizer: str = None,
96
135
  conversationally: bool = False,
97
136
  ) -> Union[Dict[str, Any], Generator]:
137
+ """
138
+ Sends a prompt to the HeckAI API and returns the response.
139
+
140
+ Args:
141
+ prompt (str): The prompt or question to send to the API.
142
+ stream (bool): If True, yields streaming responses as they arrive.
143
+ raw (bool): If True, yields raw string chunks instead of dicts.
144
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
145
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
146
+
147
+ Returns:
148
+ Union[Dict[str, Any], Generator]: If stream is False, returns a dict with the response text.
149
+ If stream is True, yields response chunks as dicts or strings.
150
+
151
+ Raises:
152
+ Exception: If the optimizer is not available.
153
+ exceptions.FailedToGenerateResponseError: On API or network errors.
154
+ """
98
155
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
99
156
  if optimizer:
100
157
  if optimizer in self.__available_optimizers:
@@ -132,16 +189,16 @@ class HeckAI(Provider):
132
189
  response.raise_for_status() # Check for HTTP errors
133
190
 
134
191
  # Use sanitize_stream to process the stream
135
- processed_stream = sanitize_stream(
136
- data=response.iter_content(chunk_size=1024), # Pass byte iterator
137
- intro_value="data: ", # Prefix to remove (note the space)
138
- to_json=False, # Content is text
139
- start_marker="data: [ANSWER_START]",
140
- end_marker="data: [ANSWER_DONE]",
141
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
142
- yield_raw_on_error=True,
143
- strip_chars=" \n\r\t" # Strip whitespace characters from chunks
144
- )
192
+ processed_stream = sanitize_stream(
193
+ data=response.iter_content(chunk_size=1024), # Pass byte iterator
194
+ intro_value="data: ", # Prefix to remove (note the space)
195
+ to_json=False, # Content is text
196
+ start_marker="data: [ANSWER_START]",
197
+ end_marker="data: [ANSWER_DONE]",
198
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
199
+ yield_raw_on_error=True,
200
+ strip_chars=" \n\r\t" # Strip whitespace characters from chunks
201
+ )
145
202
 
146
203
  for content_chunk in processed_stream:
147
204
  # content_chunk is the text between ANSWER_START and ANSWER_DONE
@@ -151,19 +208,19 @@ class HeckAI(Provider):
151
208
 
152
209
  # Only update history if we received a valid response
153
210
  if streaming_text:
154
- # Update history and previous answer after stream finishes
155
- self.previous_answer = streaming_text
156
- # Convert to simple text before updating conversation
157
- try:
158
- # Ensure content is valid before updating conversation
159
- if streaming_text and isinstance(streaming_text, str):
160
- # Sanitize the content to ensure it's valid
161
- sanitized_text = streaming_text.strip()
162
- if sanitized_text: # Only update if we have non-empty content
163
- self.conversation.update_chat_history(prompt, sanitized_text)
164
- except Exception as e:
165
- # If conversation update fails, log but don't crash
166
- print(f"Warning: Failed to update conversation history: {str(e)}")
211
+ # Update history and previous answer after stream finishes
212
+ self.previous_answer = streaming_text
213
+ # Convert to simple text before updating conversation
214
+ try:
215
+ # Ensure content is valid before updating conversation
216
+ if streaming_text and isinstance(streaming_text, str):
217
+ # Sanitize the content to ensure it's valid
218
+ sanitized_text = streaming_text.strip()
219
+ if sanitized_text: # Only update if we have non-empty content
220
+ self.conversation.update_chat_history(prompt, sanitized_text)
221
+ except Exception as e:
222
+ # If conversation update fails, log but don't crash
223
+ print(f"Warning: Failed to update conversation history: {str(e)}")
167
224
 
168
225
  except CurlError as e: # Catch CurlError
169
226
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
@@ -197,6 +254,15 @@ class HeckAI(Provider):
197
254
 
198
255
  @staticmethod
199
256
  def fix_encoding(text):
257
+ """
258
+ Fixes encoding issues in the response text.
259
+
260
+ Args:
261
+ text (Union[str, dict]): The text or response dict to fix encoding for.
262
+
263
+ Returns:
264
+ Union[str, dict]: The text or dict with encoding corrected if possible.
265
+ """
200
266
  if isinstance(text, dict) and "text" in text:
201
267
  try:
202
268
  text["text"] = text["text"].encode("latin1").decode("utf-8")
@@ -210,13 +276,25 @@ class HeckAI(Provider):
210
276
  return text
211
277
  return text
212
278
 
213
- def chat(
214
- self,
215
- prompt: str,
216
- stream: bool = False,
217
- optimizer: str = None,
218
- conversationally: bool = False,
219
- ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
279
+ def chat(
280
+ self,
281
+ prompt: str,
282
+ stream: bool = False,
283
+ optimizer: str = None,
284
+ conversationally: bool = False,
285
+ ) -> Union[str, Generator[str, None, None]]:
286
+ """
287
+ Sends a prompt to the HeckAI API and returns only the message text.
288
+
289
+ Args:
290
+ prompt (str): The prompt or question to send to the API.
291
+ stream (bool): If True, yields streaming response text.
292
+ optimizer (str, optional): Name of the optimizer to apply to the prompt.
293
+ conversationally (bool): If True, optimizer is applied to the full conversation prompt.
294
+
295
+ Returns:
296
+ Union[str, Generator[str, None, None]]: The response text, or a generator yielding text chunks.
297
+ """
220
298
  def for_stream_chat():
221
299
  # ask() yields dicts or strings when streaming
222
300
  gen = self.ask(
@@ -236,50 +314,62 @@ class HeckAI(Provider):
236
314
 
237
315
  return for_stream_chat() if stream else for_non_stream_chat()
238
316
 
239
- def get_message(self, response: dict) -> str:
240
- # Validate response format
241
- if not isinstance(response, dict):
242
- raise TypeError(f"Expected dict response, got {type(response).__name__}")
243
-
244
- # Handle missing text key gracefully
245
- if "text" not in response:
246
- return ""
247
-
248
- # Ensure text is a string
249
- text = response["text"]
250
- if not isinstance(text, str):
251
- return str(text)
252
-
253
- return text
254
-
255
- if __name__ == "__main__":
256
- # Ensure curl_cffi is installed
257
- print("-" * 80)
258
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
259
- print("-" * 80)
260
-
261
- for model in HeckAI.AVAILABLE_MODELS:
262
- try:
263
- test_ai = HeckAI(model=model, timeout=60)
264
- # Use non-streaming mode first to avoid potential streaming issues
265
- try:
266
- response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
267
- print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
268
- except Exception as e1:
269
- # Fall back to streaming if non-streaming fails
270
- print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
271
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
272
- response_text = ""
273
- for chunk in response:
274
- if chunk and isinstance(chunk, str):
275
- response_text += chunk
276
-
277
- if response_text and len(response_text.strip()) > 0:
278
- status = ""
279
- # Truncate response if too long
280
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
281
- print(f"\r{model:<50} {status:<10} {display_text}")
282
- else:
283
- raise ValueError("Empty or invalid response")
284
- except Exception as e:
285
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
317
+ def get_message(self, response: dict) -> str:
318
+ """
319
+ Extracts the message text from the API response.
320
+
321
+ Args:
322
+ response (dict): The API response dictionary.
323
+
324
+ Returns:
325
+ str: The extracted message text. Returns an empty string if not found.
326
+
327
+ Raises:
328
+ TypeError: If the response is not a dictionary.
329
+ """
330
+ # Validate response format
331
+ if not isinstance(response, dict):
332
+ raise TypeError(f"Expected dict response, got {type(response).__name__}")
333
+
334
+ # Handle missing text key gracefully
335
+ if "text" not in response:
336
+ return ""
337
+
338
+ # Ensure text is a string
339
+ text = response["text"]
340
+ if not isinstance(text, str):
341
+ return str(text)
342
+
343
+ return text
344
+
345
+ if __name__ == "__main__":
346
+ # Ensure curl_cffi is installed
347
+ print("-" * 80)
348
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
+ print("-" * 80)
350
+
351
+ for model in HeckAI.AVAILABLE_MODELS:
352
+ try:
353
+ test_ai = HeckAI(model=model, timeout=60)
354
+ # Use non-streaming mode first to avoid potential streaming issues
355
+ try:
356
+ response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
+ print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
+ except Exception as e1:
359
+ # Fall back to streaming if non-streaming fails
360
+ print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
+ response_text = ""
363
+ for chunk in response:
364
+ if chunk and isinstance(chunk, str):
365
+ response_text += chunk
366
+
367
+ if response_text and len(response_text.strip()) > 0:
368
+ status = "✓"
369
+ # Truncate response if too long
370
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
+ print(f"\r{model:<50} {status:<10} {display_text}")
372
+ else:
373
+ raise ValueError("Empty or invalid response")
374
+ except Exception as e:
375
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -21,6 +21,7 @@ class LambdaChat(Provider):
21
21
 
22
22
  AVAILABLE_MODELS = [
23
23
  "deepseek-llama3.3-70b",
24
+ "apriel-5b-instruct",
24
25
  "deepseek-r1",
25
26
  "hermes-3-llama-3.1-405b-fp8",
26
27
  "llama3.1-nemotron-70b-instruct",
@@ -66,9 +66,13 @@ class MCPCore(Provider):
66
66
  ):
67
67
  """Initializes the MCPCore API client."""
68
68
  if model not in self.AVAILABLE_MODELS:
69
- print(f"Warning: Model '{model}' not in known AVAILABLE_MODELS. Attempting to use anyway.")
69
+ print(f"Warning: Model '{model}' is not listed in AVAILABLE_MODELS. Proceeding with the provided model.")
70
70
 
71
71
  self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
72
+
73
+ # Cache the user-agent at the class level
74
+ if not hasattr(MCPCore, '_cached_user_agent'):
75
+ MCPCore._cached_user_agent = LitAgent().random()
72
76
  self.model = model
73
77
  self.system_prompt = system_prompt
74
78
  self.cookies_path = cookies_path
@@ -82,7 +86,7 @@ class MCPCore(Provider):
82
86
  'authority': 'chat.mcpcore.xyz',
83
87
  'accept': '*/*',
84
88
  'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
85
- 'authorization': f'Bearer {self.token}' if self.token else '',
89
+ **({'authorization': f'Bearer {self.token}'} if self.token else {}),
86
90
  'content-type': 'application/json',
87
91
  'dnt': '1',
88
92
  'origin': 'https://chat.mcpcore.xyz',
@@ -95,7 +99,7 @@ class MCPCore(Provider):
95
99
  'sec-fetch-mode': 'cors',
96
100
  'sec-fetch-site': 'same-origin',
97
101
  'sec-gpc': '1',
98
- 'user-agent': LitAgent().random(),
102
+ 'user-agent': self._cached_user_agent,
99
103
  }
100
104
 
101
105
  # Apply headers, proxies, and cookies to the session