webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (132) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/__init__.py +9 -9
  9. webscout/Extra/autocoder/autocoder_utiles.py +193 -199
  10. webscout/Extra/autocoder/rawdog.py +789 -677
  11. webscout/Extra/gguf.py +682 -428
  12. webscout/Extra/weather.py +178 -156
  13. webscout/Extra/weather_ascii.py +70 -17
  14. webscout/Litlogger/core/logger.py +1 -2
  15. webscout/Litlogger/handlers/file.py +1 -1
  16. webscout/Litlogger/styles/formats.py +0 -2
  17. webscout/Litlogger/utils/detectors.py +0 -1
  18. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  19. webscout/Provider/AISEARCH/ISou.py +1 -22
  20. webscout/Provider/AISEARCH/felo_search.py +0 -1
  21. webscout/Provider/AllenAI.py +28 -30
  22. webscout/Provider/C4ai.py +29 -11
  23. webscout/Provider/ChatGPTClone.py +226 -0
  24. webscout/Provider/ChatGPTGratis.py +24 -56
  25. webscout/Provider/DeepSeek.py +25 -17
  26. webscout/Provider/Deepinfra.py +115 -48
  27. webscout/Provider/Gemini.py +1 -1
  28. webscout/Provider/Glider.py +33 -12
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +23 -7
  31. webscout/Provider/Hunyuan.py +272 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/LambdaChat.py +391 -0
  34. webscout/Provider/Netwrck.py +42 -19
  35. webscout/Provider/OLLAMA.py +256 -32
  36. webscout/Provider/PI.py +4 -2
  37. webscout/Provider/Perplexitylabs.py +26 -6
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  51. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  52. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  53. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  54. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  55. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  56. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  57. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  58. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  59. webscout/Provider/TTS/__init__.py +2 -2
  60. webscout/Provider/TTS/deepgram.py +12 -39
  61. webscout/Provider/TTS/elevenlabs.py +14 -40
  62. webscout/Provider/TTS/gesserit.py +11 -35
  63. webscout/Provider/TTS/murfai.py +13 -39
  64. webscout/Provider/TTS/parler.py +17 -40
  65. webscout/Provider/TTS/speechma.py +180 -0
  66. webscout/Provider/TTS/streamElements.py +17 -44
  67. webscout/Provider/TextPollinationsAI.py +39 -59
  68. webscout/Provider/Venice.py +25 -8
  69. webscout/Provider/WebSim.py +227 -0
  70. webscout/Provider/WiseCat.py +27 -5
  71. webscout/Provider/Youchat.py +64 -37
  72. webscout/Provider/__init__.py +12 -7
  73. webscout/Provider/akashgpt.py +20 -5
  74. webscout/Provider/flowith.py +33 -7
  75. webscout/Provider/freeaichat.py +32 -45
  76. webscout/Provider/koala.py +20 -5
  77. webscout/Provider/labyrinth.py +239 -0
  78. webscout/Provider/learnfastai.py +28 -15
  79. webscout/Provider/llamatutor.py +1 -1
  80. webscout/Provider/llmchat.py +30 -8
  81. webscout/Provider/multichat.py +65 -9
  82. webscout/Provider/sonus.py +208 -0
  83. webscout/Provider/talkai.py +1 -0
  84. webscout/Provider/turboseek.py +3 -0
  85. webscout/Provider/tutorai.py +2 -0
  86. webscout/Provider/typegpt.py +155 -65
  87. webscout/Provider/uncovr.py +297 -0
  88. webscout/Provider/x0gpt.py +3 -1
  89. webscout/Provider/yep.py +102 -20
  90. webscout/__init__.py +3 -0
  91. webscout/cli.py +53 -40
  92. webscout/conversation.py +1 -10
  93. webscout/litagent/__init__.py +2 -2
  94. webscout/litagent/agent.py +356 -20
  95. webscout/litagent/constants.py +34 -5
  96. webscout/litprinter/__init__.py +0 -3
  97. webscout/models.py +181 -0
  98. webscout/optimizers.py +1 -1
  99. webscout/prompt_manager.py +2 -8
  100. webscout/scout/core/scout.py +1 -4
  101. webscout/scout/core/search_result.py +1 -1
  102. webscout/scout/core/text_utils.py +1 -1
  103. webscout/scout/core.py +2 -5
  104. webscout/scout/element.py +1 -1
  105. webscout/scout/parsers/html_parser.py +1 -1
  106. webscout/scout/utils.py +0 -1
  107. webscout/swiftcli/__init__.py +1 -3
  108. webscout/tempid.py +1 -1
  109. webscout/update_checker.py +1 -3
  110. webscout/version.py +1 -1
  111. webscout/webscout_search_async.py +1 -2
  112. webscout/yep_search.py +297 -297
  113. {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
  114. {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
  115. {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
  116. webscout/Extra/autollama.py +0 -231
  117. webscout/Provider/Amigo.py +0 -274
  118. webscout/Provider/Bing.py +0 -243
  119. webscout/Provider/DiscordRocks.py +0 -253
  120. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  121. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  122. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  123. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  124. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  125. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  126. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  127. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  128. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  129. webscout/Provider/TTS/voicepod.py +0 -117
  130. {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  131. {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  132. {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,239 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class LabyrinthAI(Provider):
13
+ """
14
+ A class to interact with the Labyrinth AI chat API.
15
+ """
16
+
17
+ # AVAILABLE_MODELS = [
18
+ # "gemini-2.0-flash"
19
+ # ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ # model: str = "gemini-2.0-flash",
33
+ browser: str = "chrome"
34
+ ):
35
+ """Initializes the Labyrinth AI API client."""
36
+ # if model not in self.AVAILABLE_MODELS:
37
+ # raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
38
+
39
+ self.url = "https://labyrinth-ebon.vercel.app/api/chat"
40
+
41
+ # Initialize LitAgent for user agent generation
42
+ self.agent = LitAgent()
43
+ # Use fingerprinting to create a consistent browser identity
44
+ self.fingerprint = self.agent.generate_fingerprint(browser)
45
+
46
+ # Use the fingerprint for headers
47
+ self.headers = {
48
+ "Accept": self.fingerprint["accept"],
49
+ "Accept-Encoding": "gzip, deflate, br, zstd",
50
+ "Accept-Language": self.fingerprint["accept_language"],
51
+ "Content-Type": "application/json",
52
+ "Origin": "https://labyrinth-ebon.vercel.app",
53
+ "Cookie": "stock-mode=false; __Host-next-auth.csrf-token=68aa6224f2ff7bbf2c4480a90c49b7b95aaac01a63ed90f3d20a69292c16a366%7C1f6672653c6e304ea971373fecdc3fe491568d014c68cdf3b26ead42f1c6ac62; __Secure-next-auth.callback-url=https%3A%2F%2Flabyrinth-ebon.vercel.app%2F; selectedModel={\"id\":\"gemini-2.0-flash\",\"name\":\"Gemini 2.0 Flash\",\"provider\":\"Google Generative AI\",\"providerId\":\"google\",\"enabled\":true,\"toolCallType\":\"native\",\"searchMode\":true}; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..Z5-1j_rsCWRHY17B.s0lMkhWr0S7a3-4h2p-ce0NJHeNyh8nDyOcsrzFU8AZtBbygGcHKbJ8PzLLQBNL7NwrUwET3fKGbtnAphaVjuSJQfXA0tu69zKJELPw-A3x0Ev6aHJMTG3l9_SweByHyfCSCnGB7tvjwEFsW4c5xs_HzMdPmoRTYyYzlZPuDGhHtQX7WyeUiARc36NfwV-KJYpzXV5-g0VkpsxFEawcfdk6D_S7JtOMmjMTTYuw2BbNYvtlvM-n_XivIctQmQ5Fp65JEE73nr5hWVReyYrkyfUGt4Q.TP8Woa-7Ao05yVCjbbGDug",
54
+ "Referer": "https://labyrinth-ebon.vercel.app/",
55
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
56
+ "Sec-CH-UA-Mobile": "?0",
57
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
58
+ "User-Agent": self.fingerprint["user_agent"],
59
+ "Sec-Fetch-Dest": "empty",
60
+ "Sec-Fetch-Mode": "cors",
61
+ "Sec-Fetch-Site": "same-origin",
62
+ "Sec-GPC": "1"
63
+ }
64
+
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+
69
+ self.is_conversation = is_conversation
70
+ self.max_tokens_to_sample = max_tokens
71
+ self.timeout = timeout
72
+ self.last_response = {}
73
+ # self.model = model
74
+
75
+ self.__available_optimizers = (
76
+ method
77
+ for method in dir(Optimizers)
78
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
79
+ )
80
+ Conversation.intro = (
81
+ AwesomePrompts().get_act(
82
+ act, raise_not_found=True, default=None, case_insensitive=True
83
+ )
84
+ if act
85
+ else intro or Conversation.intro
86
+ )
87
+
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+
93
+ def refresh_identity(self, browser: str = None):
94
+ """
95
+ Refreshes the browser identity fingerprint.
96
+
97
+ Args:
98
+ browser: Specific browser to use for the new fingerprint
99
+ """
100
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
101
+ self.fingerprint = self.agent.generate_fingerprint(browser)
102
+
103
+ # Update headers with new fingerprint
104
+ self.headers.update({
105
+ "Accept": self.fingerprint["accept"],
106
+ "Accept-Language": self.fingerprint["accept_language"],
107
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
109
+ "User-Agent": self.fingerprint["user_agent"],
110
+ })
111
+
112
+ # Update session headers
113
+ for header, value in self.headers.items():
114
+ self.session.headers[header] = value
115
+
116
+ return self.fingerprint
117
+
118
+ def ask(
119
+ self,
120
+ prompt: str,
121
+ stream: bool = False,
122
+ raw: bool = False,
123
+ optimizer: str = None,
124
+ conversationally: bool = False,
125
+ ) -> Union[Dict[str, Any], Generator]:
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
134
+
135
+ # Prepare the request payload
136
+ payload = {
137
+ "id": str(uuid.uuid4()),
138
+ "messages": [
139
+ {
140
+ "role": "user",
141
+ "content": conversation_prompt,
142
+ "parts": [{"type": "text", "text": conversation_prompt}]
143
+ }
144
+ ],
145
+ "stockMode": False
146
+ }
147
+
148
+ def for_stream():
149
+ try:
150
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
151
+ if response.status_code != 200:
152
+ # If we get a non-200 response, try refreshing our identity once
153
+ if response.status_code in [403, 429]:
154
+ self.refresh_identity()
155
+ # Retry with new identity
156
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
157
+ if not retry_response.ok:
158
+ raise exceptions.FailedToGenerateResponseError(
159
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
160
+ )
161
+ response = retry_response
162
+ else:
163
+ raise exceptions.FailedToGenerateResponseError(
164
+ f"Request failed with status code {response.status_code}"
165
+ )
166
+
167
+ streaming_text = ""
168
+ for line in response.iter_lines():
169
+ if line:
170
+ try:
171
+ line = line.decode('utf-8')
172
+ if line.startswith('0:'):
173
+ content = line[2:].strip('"')
174
+ streaming_text += content
175
+ resp = dict(text=content)
176
+ yield resp if raw else resp
177
+ except UnicodeDecodeError:
178
+ continue
179
+
180
+ self.last_response = {"text": streaming_text}
181
+ self.conversation.update_chat_history(prompt, streaming_text)
182
+
183
+ except requests.RequestException as e:
184
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
185
+
186
+ def for_non_stream():
187
+ try:
188
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
189
+ if response.status_code != 200:
190
+ if response.status_code in [403, 429]:
191
+ self.refresh_identity()
192
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
193
+ if not response.ok:
194
+ raise exceptions.FailedToGenerateResponseError(
195
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
196
+ )
197
+ else:
198
+ raise exceptions.FailedToGenerateResponseError(
199
+ f"Request failed with status code {response.status_code}"
200
+ )
201
+
202
+ full_response = ""
203
+ for line in response.iter_lines():
204
+ if line:
205
+ try:
206
+ line = line.decode('utf-8')
207
+ if line.startswith('0:'):
208
+ content = line[2:].strip('"')
209
+ full_response += content
210
+ except UnicodeDecodeError:
211
+ continue
212
+
213
+ self.last_response = {"text": full_response}
214
+ self.conversation.update_chat_history(prompt, full_response)
215
+ return {"text": full_response}
216
+ except Exception as e:
217
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
218
+
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ ) -> Union[str, Generator[str, None, None]]:
228
+ def for_stream():
229
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
230
+ yield self.get_message(response)
231
+ def for_non_stream():
232
+ return self.get_message(
233
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
234
+ )
235
+ return for_stream() if stream else for_non_stream()
236
+
237
+ def get_message(self, response: dict) -> str:
238
+ assert isinstance(response, dict), "Response should be of dict data-type only"
239
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional
3
+ from typing import Optional, Union, Generator
4
4
  import uuid
5
5
  import requests
6
6
  import cloudscraper
@@ -118,7 +118,9 @@ class LearnFast(Provider):
118
118
  """
119
119
  payload = {
120
120
  "prompt": conversation_prompt,
121
+ "firstQuestionFlag": True,
121
122
  "sessionId": session_id,
123
+ "attachments": []
122
124
  }
123
125
  if image_url:
124
126
  payload["attachments"] = [
@@ -138,7 +140,7 @@ class LearnFast(Provider):
138
140
  optimizer: str = None,
139
141
  conversationally: bool = False,
140
142
  image_path: Optional[str] = None,
141
- ) -> dict:
143
+ ) -> Union[dict, Generator[dict, None, None]]:
142
144
  """Chat with LearnFast
143
145
 
144
146
  Args:
@@ -151,7 +153,7 @@ class LearnFast(Provider):
151
153
  Defaults to None.
152
154
 
153
155
  Returns:
154
- dict : {}
156
+ Union[dict, Generator[dict, None, None]]: Response generated
155
157
  """
156
158
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
159
  if optimizer:
@@ -194,20 +196,24 @@ class LearnFast(Provider):
194
196
  full_response = ""
195
197
  for line in response.iter_lines(decode_unicode=True):
196
198
  if line:
197
- if line.strip() == "[DONE]":
199
+ line = line.strip()
200
+ if line == "[DONE]":
198
201
  break
199
202
  try:
200
203
  json_response = json.loads(line)
201
- message = json_response.get('data', {}).get('message', '')
202
- if message:
203
- full_response += message
204
- # print(message, end='', flush=True)
204
+ if json_response.get('code') == 200 and json_response.get('data'):
205
+ message = json_response['data'].get('message', '')
206
+ if message:
207
+ full_response += message
208
+ if stream:
209
+ yield {"text": message}
205
210
  except json.JSONDecodeError:
206
- print(f"\nFailed to parse JSON: {line}")
211
+ pass
207
212
  self.last_response.update({"text": full_response})
208
213
  self.conversation.update_chat_history(prompt, full_response)
209
214
 
210
- return self.last_response
215
+ if not stream:
216
+ return self.last_response
211
217
  except requests.exceptions.RequestException as e:
212
218
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
213
219
 
@@ -218,7 +224,7 @@ class LearnFast(Provider):
218
224
  optimizer: str = None,
219
225
  conversationally: bool = False,
220
226
  image_path: Optional[str] = None,
221
- ) -> str:
227
+ ) -> Union[str, Generator[str, None, None]]:
222
228
  """Generate response `str`
223
229
  Args:
224
230
  prompt (str): Prompt to be send.
@@ -228,10 +234,17 @@ class LearnFast(Provider):
228
234
  image_path (Optional[str], optional): Path to the image to be uploaded.
229
235
  Defaults to None.
230
236
  Returns:
231
- str: Response generated
237
+ Union[str, Generator[str, None, None]]: Response generated
232
238
  """
233
- response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
234
- return self.get_message(response)
239
+ try:
240
+ response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
241
+ if stream:
242
+ for chunk in response:
243
+ yield chunk["text"]
244
+ else:
245
+ return str(response)
246
+ except Exception as e:
247
+ return f"Error: {str(e)}"
235
248
 
236
249
  def get_message(self, response: dict) -> str:
237
250
  """Retrieves message only from response
@@ -248,6 +261,6 @@ class LearnFast(Provider):
248
261
  if __name__ == "__main__":
249
262
  from rich import print
250
263
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path=None)
264
+ response = ai.chat(input(">>> "), stream=True)
252
265
  for chunk in response:
253
266
  print(chunk, end="", flush=True)
@@ -12,7 +12,7 @@ class LlamaTutor(Provider):
12
12
  """
13
13
  A class to interact with the LlamaTutor API (Together.ai)
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["UNKNOWN"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -18,9 +18,9 @@ class LLMChat(Provider):
18
18
  "@cf/meta/llama-3.1-70b-instruct",
19
19
  "@cf/meta/llama-3.1-8b-instruct",
20
20
  "@cf/meta/llama-3.2-3b-instruct",
21
- "@cf/meta/llama-3.2-1b-instruct"
22
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
23
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b"
21
+ "@cf/meta/llama-3.2-1b-instruct",
22
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
23
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
24
24
  ]
25
25
 
26
26
  def __init__(
@@ -184,8 +184,30 @@ class LLMChat(Provider):
184
184
  return response["text"]
185
185
 
186
186
  if __name__ == "__main__":
187
- from rich import print
188
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
189
- response = ai.chat("What's the meaning of life?", stream=True)
190
- for chunk in response:
191
- print(chunk, end="", flush=True)
187
+ print("-" * 80)
188
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
+ print("-" * 80)
190
+
191
+ # Test all available models
192
+ working = 0
193
+ total = len(LLMChat.AVAILABLE_MODELS)
194
+
195
+ for model in LLMChat.AVAILABLE_MODELS:
196
+ try:
197
+ test_ai = LLMChat(model=model, timeout=60)
198
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
199
+ response_text = ""
200
+ for chunk in response:
201
+ response_text += chunk
202
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
203
+
204
+ if response_text and len(response_text.strip()) > 0:
205
+ status = "✓"
206
+ # Truncate response if too long
207
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
208
+ else:
209
+ status = "✗"
210
+ display_text = "Empty or invalid response"
211
+ print(f"\r{model:<50} {status:<10} {display_text}")
212
+ except Exception as e:
213
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -66,6 +66,45 @@ MODEL_CONFIGS = {
66
66
  }
67
67
 
68
68
  class MultiChatAI(Provider):
69
+ """
70
+ A class to interact with the MultiChatAI API.
71
+ """
72
+ AVAILABLE_MODELS = [
73
+ # Llama Models
74
+ "llama-3.3-70b-versatile",
75
+ "llama-3.2-11b-vision-preview",
76
+ "deepseek-r1-distill-llama-70b",
77
+
78
+ # Cohere Models
79
+ # "command-r", >>>> NOT WORKING
80
+ # "command", >>>> NOT WORKING
81
+
82
+ # Google Models
83
+ # "gemini-1.5-flash-002", >>>> NOT WORKING
84
+ "gemma2-9b-it",
85
+ "gemini-2.0-flash",
86
+
87
+ # DeepInfra Models
88
+ "Sao10K/L3.1-70B-Euryale-v2.2",
89
+ "Gryphe/MythoMax-L2-13b",
90
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
91
+ "deepseek-ai/DeepSeek-V3",
92
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
93
+ "NousResearch/Hermes-3-Llama-3.1-405B",
94
+ # "gemma-2-27b-it", >>>> NOT WORKING
95
+
96
+ # Mistral Models
97
+ # "mistral-small-latest", >>>> NOT WORKING
98
+ # "codestral-latest", >>>> NOT WORKING
99
+ # "open-mistral-7b", >>>> NOT WORKING
100
+ # "open-mixtral-8x7b", >>>> NOT WORKING
101
+
102
+ # Alibaba Models
103
+ "Qwen/Qwen2.5-72B-Instruct",
104
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
105
+ "Qwen/QwQ-32B-Preview"
106
+ ]
107
+
69
108
  def __init__(
70
109
  self,
71
110
  is_conversation: bool = True,
@@ -85,6 +124,8 @@ class MultiChatAI(Provider):
85
124
  top_p: float = 1
86
125
  ):
87
126
  """Initializes the MultiChatAI API client."""
127
+ if model not in self.AVAILABLE_MODELS:
128
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
88
129
  self.session = requests.Session()
89
130
  self.is_conversation = is_conversation
90
131
  self.max_tokens_to_sample = max_tokens
@@ -258,12 +299,27 @@ class MultiChatAI(Provider):
258
299
  return str(response)
259
300
 
260
301
  if __name__ == "__main__":
261
- from rich import print
262
-
263
- # Example usage
264
- ai = MultiChatAI(model="Qwen/QwQ-32B-Preview")
265
- try:
266
- response = ai.chat("What is quantum computing?")
267
- print(response)
268
- except Exception as e:
269
- print(f"Error: {str(e)}")
302
+ print("-" * 80)
303
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
304
+ print("-" * 80)
305
+
306
+ # Test all available models
307
+ working = 0
308
+ total = len(MultiChatAI.AVAILABLE_MODELS)
309
+
310
+ for model in MultiChatAI.AVAILABLE_MODELS:
311
+ try:
312
+ test_ai = MultiChatAI(model=model, timeout=60)
313
+ response = test_ai.chat("Say 'Hello' in one word")
314
+ response_text = response
315
+
316
+ if response_text and len(response_text.strip()) > 0:
317
+ status = "✓"
318
+ # Truncate response if too long
319
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
320
+ else:
321
+ status = "✗"
322
+ display_text = "Empty or invalid response"
323
+ print(f"{model:<50} {status:<10} {display_text}")
324
+ except Exception as e:
325
+ print(f"{model:<50} {'✗':<10} {str(e)}")