webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (137) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +432 -0
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/Cloudflare.py +18 -21
  24. webscout/Provider/DeepSeek.py +27 -48
  25. webscout/Provider/Deepinfra.py +129 -53
  26. webscout/Provider/Gemini.py +1 -1
  27. webscout/Provider/GithubChat.py +362 -0
  28. webscout/Provider/Glider.py +25 -8
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +38 -5
  31. webscout/Provider/HuggingFaceChat.py +462 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/Marcus.py +7 -50
  34. webscout/Provider/Netwrck.py +43 -67
  35. webscout/Provider/PI.py +4 -2
  36. webscout/Provider/Perplexitylabs.py +26 -6
  37. webscout/Provider/Phind.py +29 -3
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  51. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  52. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  53. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  54. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  55. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  56. webscout/Provider/TTS/__init__.py +2 -2
  57. webscout/Provider/TTS/deepgram.py +12 -39
  58. webscout/Provider/TTS/elevenlabs.py +14 -40
  59. webscout/Provider/TTS/gesserit.py +11 -35
  60. webscout/Provider/TTS/murfai.py +13 -39
  61. webscout/Provider/TTS/parler.py +17 -40
  62. webscout/Provider/TTS/speechma.py +180 -0
  63. webscout/Provider/TTS/streamElements.py +17 -44
  64. webscout/Provider/TextPollinationsAI.py +39 -59
  65. webscout/Provider/Venice.py +217 -200
  66. webscout/Provider/WiseCat.py +27 -5
  67. webscout/Provider/Youchat.py +63 -36
  68. webscout/Provider/__init__.py +13 -8
  69. webscout/Provider/akashgpt.py +28 -10
  70. webscout/Provider/copilot.py +416 -0
  71. webscout/Provider/flowith.py +196 -0
  72. webscout/Provider/freeaichat.py +32 -45
  73. webscout/Provider/granite.py +17 -53
  74. webscout/Provider/koala.py +20 -5
  75. webscout/Provider/llamatutor.py +7 -47
  76. webscout/Provider/llmchat.py +36 -53
  77. webscout/Provider/multichat.py +92 -98
  78. webscout/Provider/talkai.py +1 -0
  79. webscout/Provider/turboseek.py +3 -0
  80. webscout/Provider/tutorai.py +2 -0
  81. webscout/Provider/typegpt.py +154 -64
  82. webscout/Provider/x0gpt.py +3 -1
  83. webscout/Provider/yep.py +102 -20
  84. webscout/__init__.py +3 -0
  85. webscout/cli.py +4 -40
  86. webscout/conversation.py +1 -10
  87. webscout/exceptions.py +19 -9
  88. webscout/litagent/__init__.py +2 -2
  89. webscout/litagent/agent.py +351 -20
  90. webscout/litagent/constants.py +34 -5
  91. webscout/litprinter/__init__.py +0 -3
  92. webscout/models.py +181 -0
  93. webscout/optimizers.py +1 -1
  94. webscout/prompt_manager.py +2 -8
  95. webscout/scout/core/scout.py +1 -4
  96. webscout/scout/core/search_result.py +1 -1
  97. webscout/scout/core/text_utils.py +1 -1
  98. webscout/scout/core.py +2 -5
  99. webscout/scout/element.py +1 -1
  100. webscout/scout/parsers/html_parser.py +1 -1
  101. webscout/scout/utils.py +0 -1
  102. webscout/swiftcli/__init__.py +1 -3
  103. webscout/tempid.py +1 -1
  104. webscout/update_checker.py +55 -95
  105. webscout/version.py +1 -1
  106. webscout/webscout_search_async.py +1 -2
  107. webscout/yep_search.py +297 -297
  108. webscout-7.6.dist-info/LICENSE.md +146 -0
  109. {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
  110. {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
  111. webscout/Extra/autollama.py +0 -231
  112. webscout/Local/__init__.py +0 -10
  113. webscout/Local/_version.py +0 -3
  114. webscout/Local/formats.py +0 -747
  115. webscout/Local/model.py +0 -1368
  116. webscout/Local/samplers.py +0 -125
  117. webscout/Local/thread.py +0 -539
  118. webscout/Local/ui.py +0 -401
  119. webscout/Local/utils.py +0 -388
  120. webscout/Provider/Amigo.py +0 -274
  121. webscout/Provider/Bing.py +0 -243
  122. webscout/Provider/DiscordRocks.py +0 -253
  123. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  124. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  125. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  126. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  127. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  128. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  129. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  130. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  131. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  132. webscout/Provider/TTS/voicepod.py +0 -117
  133. webscout/Provider/dgaf.py +0 -214
  134. webscout-7.4.dist-info/LICENSE.md +0 -211
  135. {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  136. {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  137. {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,3 @@
1
-
2
1
  import requests
3
2
  import json
4
3
 
@@ -8,13 +7,12 @@ from webscout.AIutel import AwesomePrompts
8
7
  from webscout.AIbase import Provider
9
8
  from webscout import exceptions
10
9
  from webscout import LitAgent as Lit
11
- from webscout.Litlogger import Logger, LogFormat
12
10
 
13
11
  class LlamaTutor(Provider):
14
12
  """
15
- A class to interact with the LlamaTutor API (Together.ai) with comprehensive logging.
13
+ A class to interact with the LlamaTutor API (Together.ai)
16
14
  """
17
-
15
+ AVAILABLE_MODELS = ["UNKNOWN"]
18
16
  def __init__(
19
17
  self,
20
18
  is_conversation: bool = True,
@@ -26,20 +24,11 @@ class LlamaTutor(Provider):
26
24
  proxies: dict = {},
27
25
  history_offset: int = 10250,
28
26
  act: str = None,
29
- system_prompt: str = "You are a helpful AI assistant.",
30
- logging: bool = False
27
+ system_prompt: str = "You are a helpful AI assistant."
31
28
  ):
32
29
  """
33
- Initializes the LlamaTutor API with given parameters and logging capabilities.
30
+ Initializes the LlamaTutor API with given parameters.
34
31
  """
35
- self.logger = Logger(
36
- name="LlamaTutor",
37
- format=LogFormat.MODERN_EMOJI,
38
-
39
- ) if logging else None
40
-
41
- if self.logger:
42
- self.logger.info("Initializing LlamaTutor API")
43
32
 
44
33
  self.session = requests.Session()
45
34
  self.is_conversation = is_conversation
@@ -74,9 +63,6 @@ class LlamaTutor(Provider):
74
63
  )
75
64
 
76
65
  self.session.headers.update(self.headers)
77
-
78
- if self.logger:
79
- self.logger.debug("Headers configured and session updated")
80
66
 
81
67
  Conversation.intro = (
82
68
  AwesomePrompts().get_act(
@@ -92,9 +78,6 @@ class LlamaTutor(Provider):
92
78
  self.conversation.history_offset = history_offset
93
79
  self.session.proxies = proxies
94
80
 
95
- if self.logger:
96
- self.logger.info("LlamaTutor initialized successfully")
97
-
98
81
  def ask(
99
82
  self,
100
83
  prompt: str,
@@ -103,10 +86,7 @@ class LlamaTutor(Provider):
103
86
  optimizer: str = None,
104
87
  conversationally: bool = False,
105
88
  ) -> dict:
106
- """Chat with LlamaTutor with logging capabilities"""
107
- if self.logger:
108
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
109
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
89
+ """Chat with LlamaTutor"""
110
90
 
111
91
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
92
  if optimizer:
@@ -114,11 +94,7 @@ class LlamaTutor(Provider):
114
94
  conversation_prompt = getattr(Optimizers, optimizer)(
115
95
  conversation_prompt if conversationally else prompt
116
96
  )
117
- if self.logger:
118
- self.logger.debug(f"Applied optimizer: {optimizer}")
119
97
  else:
120
- if self.logger:
121
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
122
98
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
123
99
 
124
100
  payload = {
@@ -136,8 +112,6 @@ class LlamaTutor(Provider):
136
112
 
137
113
  def for_stream():
138
114
  try:
139
- if self.logger:
140
- self.logger.debug("Initiating streaming request to API")
141
115
 
142
116
  response = requests.post(
143
117
  self.api_endpoint,
@@ -148,9 +122,6 @@ class LlamaTutor(Provider):
148
122
  )
149
123
  response.raise_for_status()
150
124
 
151
- if self.logger:
152
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
153
-
154
125
  full_response = ''
155
126
  for line in response.iter_lines(decode_unicode=True):
156
127
  if line:
@@ -162,8 +133,6 @@ class LlamaTutor(Provider):
162
133
  full_response += json_data["text"]
163
134
  yield json_data["text"] if raw else dict(text=json_data["text"])
164
135
  except json.JSONDecodeError as e:
165
- if self.logger:
166
- self.logger.warning(f"Failed to parse response line: {e}")
167
136
  continue
168
137
 
169
138
  self.last_response.update(dict(text=full_response))
@@ -172,17 +141,11 @@ class LlamaTutor(Provider):
172
141
  )
173
142
 
174
143
  except requests.exceptions.HTTPError as http_err:
175
- if self.logger:
176
- self.logger.error(f"HTTP error occurred: {http_err}")
177
144
  raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
178
145
  except requests.exceptions.RequestException as err:
179
- if self.logger:
180
- self.logger.error(f"Request error occurred: {err}")
181
146
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
182
147
 
183
148
  def for_non_stream():
184
- if self.logger:
185
- self.logger.debug("Processing non-streaming request")
186
149
  for _ in for_stream():
187
150
  pass
188
151
  return self.last_response
@@ -196,9 +159,7 @@ class LlamaTutor(Provider):
196
159
  optimizer: str = None,
197
160
  conversationally: bool = False,
198
161
  ) -> str:
199
- """Generate response with logging capabilities"""
200
- if self.logger:
201
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
162
+ """Generate response"""
202
163
 
203
164
  def for_stream():
204
165
  for response in self.ask(
@@ -225,8 +186,7 @@ class LlamaTutor(Provider):
225
186
 
226
187
  if __name__ == "__main__":
227
188
  from rich import print
228
- # Enable logging for testing
229
- ai = LlamaTutor(logging=True)
189
+ ai = LlamaTutor()
230
190
  response = ai.chat("Write a poem about AI", stream=True)
231
191
  for chunk in response:
232
192
  print(chunk, end="", flush=True)
@@ -1,4 +1,3 @@
1
-
2
1
  import requests
3
2
  import json
4
3
  from typing import Any, Dict, Optional, Generator, List
@@ -8,21 +7,20 @@ from webscout.AIutel import Conversation
8
7
  from webscout.AIutel import AwesomePrompts
9
8
  from webscout.AIbase import Provider
10
9
  from webscout import exceptions
11
- from webscout.Litlogger import Logger, LogFormat
12
10
  from webscout import LitAgent as Lit
13
11
 
14
12
  class LLMChat(Provider):
15
13
  """
16
- A class to interact with the LLMChat API with comprehensive logging.
14
+ A class to interact with the LLMChat API
17
15
  """
18
16
 
19
17
  AVAILABLE_MODELS = [
20
18
  "@cf/meta/llama-3.1-70b-instruct",
21
19
  "@cf/meta/llama-3.1-8b-instruct",
22
20
  "@cf/meta/llama-3.2-3b-instruct",
23
- "@cf/meta/llama-3.2-1b-instruct"
24
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
25
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b"
21
+ "@cf/meta/llama-3.2-1b-instruct",
22
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
23
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
26
24
  ]
27
25
 
28
26
  def __init__(
@@ -37,23 +35,13 @@ class LLMChat(Provider):
37
35
  history_offset: int = 10250,
38
36
  act: str = None,
39
37
  model: str = "@cf/meta/llama-3.1-70b-instruct",
40
- system_prompt: str = "You are a helpful assistant.",
41
- logging: bool = False
38
+ system_prompt: str = "You are a helpful assistant."
42
39
  ):
43
40
  """
44
- Initializes the LLMChat API with given parameters and logging capabilities.
41
+ Initializes the LLMChat API with given parameters.
45
42
  """
46
- self.logger = Logger(
47
- name="LLMChat",
48
- format=LogFormat.MODERN_EMOJI,
49
- ) if logging else None
50
-
51
- if self.logger:
52
- self.logger.info(f"Initializing LLMChat with model: {model}")
53
43
 
54
44
  if model not in self.AVAILABLE_MODELS:
55
- if self.logger:
56
- self.logger.error(f"Invalid model selected: {model}")
57
45
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
46
 
59
47
  self.session = requests.Session()
@@ -93,9 +81,6 @@ class LLMChat(Provider):
93
81
  self.conversation.history_offset = history_offset
94
82
  self.session.proxies = proxies
95
83
 
96
- if self.logger:
97
- self.logger.info("LLMChat initialized successfully")
98
-
99
84
  def ask(
100
85
  self,
101
86
  prompt: str,
@@ -105,9 +90,6 @@ class LLMChat(Provider):
105
90
  conversationally: bool = False,
106
91
  ) -> Dict[str, Any]:
107
92
  """Chat with LLMChat with logging capabilities"""
108
- if self.logger:
109
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
110
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
111
93
 
112
94
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
113
95
  if optimizer:
@@ -115,11 +97,7 @@ class LLMChat(Provider):
115
97
  conversation_prompt = getattr(Optimizers, optimizer)(
116
98
  conversation_prompt if conversationally else prompt
117
99
  )
118
- if self.logger:
119
- self.logger.debug(f"Applied optimizer: {optimizer}")
120
100
  else:
121
- if self.logger:
122
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
123
101
  raise exceptions.FailedToGenerateResponseError(
124
102
  f"Optimizer is not one of {self.__available_optimizers}"
125
103
  )
@@ -136,14 +114,9 @@ class LLMChat(Provider):
136
114
 
137
115
  def for_stream():
138
116
  try:
139
- if self.logger:
140
- self.logger.debug("Initiating streaming request to API")
141
117
 
142
118
  with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
143
119
  response.raise_for_status()
144
-
145
- if self.logger:
146
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
147
120
 
148
121
  full_response = ""
149
122
  for line in response.iter_lines():
@@ -158,9 +131,7 @@ class LLMChat(Provider):
158
131
  yield response_text if raw else dict(text=response_text)
159
132
  except json.JSONDecodeError:
160
133
  if line.strip() != 'data: [DONE]':
161
- if self.logger:
162
- self.logger.warning(f"Failed to parse line: {line}")
163
- continue
134
+ continue
164
135
 
165
136
  self.last_response.update(dict(text=full_response))
166
137
  self.conversation.update_chat_history(
@@ -168,21 +139,14 @@ class LLMChat(Provider):
168
139
  )
169
140
 
170
141
  except requests.exceptions.RequestException as e:
171
- if self.logger:
172
- self.logger.error(f"API request failed: {str(e)}")
173
142
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
174
143
 
175
144
  def for_non_stream():
176
- if self.logger:
177
- self.logger.debug("Processing non-streaming request")
178
-
145
+
179
146
  full_response = ""
180
147
  for line in for_stream():
181
148
  full_response += line['text'] if not raw else line
182
-
183
- if self.logger:
184
- self.logger.debug("Response processing completed")
185
-
149
+
186
150
  return dict(text=full_response)
187
151
 
188
152
  return for_stream() if stream else for_non_stream()
@@ -195,8 +159,6 @@ class LLMChat(Provider):
195
159
  conversationally: bool = False,
196
160
  ) -> str | Generator[str, None, None]:
197
161
  """Generate response with logging capabilities"""
198
- if self.logger:
199
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
200
162
 
201
163
  def for_stream():
202
164
  for response in self.ask(
@@ -222,9 +184,30 @@ class LLMChat(Provider):
222
184
  return response["text"]
223
185
 
224
186
  if __name__ == "__main__":
225
- from rich import print
226
- # Enable logging for testing
227
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
228
- response = ai.chat("What's the meaning of life?", stream=True)
229
- for chunk in response:
230
- print(chunk, end="", flush=True)
187
+ print("-" * 80)
188
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
+ print("-" * 80)
190
+
191
+ # Test all available models
192
+ working = 0
193
+ total = len(LLMChat.AVAILABLE_MODELS)
194
+
195
+ for model in LLMChat.AVAILABLE_MODELS:
196
+ try:
197
+ test_ai = LLMChat(model=model, timeout=60)
198
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
199
+ response_text = ""
200
+ for chunk in response:
201
+ response_text += chunk
202
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
203
+
204
+ if response_text and len(response_text.strip()) > 0:
205
+ status = "✓"
206
+ # Truncate response if too long
207
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
208
+ else:
209
+ status = "✗"
210
+ display_text = "Empty or invalid response"
211
+ print(f"\r{model:<50} {status:<10} {display_text}")
212
+ except Exception as e:
213
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -6,7 +6,6 @@ from datetime import datetime
6
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
- from webscout.Litlogger import Logger, LogFormat
10
9
  from webscout.litagent import LitAgent
11
10
 
12
11
  # Model configurations
@@ -21,14 +20,18 @@ MODEL_CONFIGS = {
21
20
  },
22
21
  "cohere": {
23
22
  "endpoint": "https://www.multichatai.com/api/chat/cohere",
24
- "models": {"command-r": {"contextLength": 128000}},
23
+ "models": {
24
+ "command-r": {"contextLength": 128000},
25
+ "command": {"contextLength": 4096},
26
+ },
25
27
  },
26
28
  "google": {
27
29
  "endpoint": "https://www.multichatai.com/api/chat/google",
28
30
  "models": {
29
31
  "gemini-1.5-flash-002": {"contextLength": 1048576},
30
32
  "gemma2-9b-it": {"contextLength": 8192},
31
- },
33
+ "gemini-2.0-flash": {"contextLength": 128000},
34
+ },
32
35
  "message_format": "parts",
33
36
  },
34
37
  "deepinfra": {
@@ -38,6 +41,9 @@ MODEL_CONFIGS = {
38
41
  "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
39
42
  "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
40
43
  "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
44
+ "meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
45
+ "NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
46
+ "gemma-2-27b-it": {"contextLength": 8192},
41
47
  },
42
48
  },
43
49
  "mistral": {
@@ -49,9 +55,56 @@ MODEL_CONFIGS = {
49
55
  "open-mixtral-8x7b": {"contextLength": 8000},
50
56
  },
51
57
  },
58
+ "alibaba": {
59
+ "endpoint": "https://www.multichatai.com/api/chat/alibaba",
60
+ "models": {
61
+ "Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
62
+ "Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
63
+ "Qwen/QwQ-32B-Preview": {"contextLength": 32768},
64
+ },
65
+ },
52
66
  }
53
67
 
54
68
  class MultiChatAI(Provider):
69
+ """
70
+ A class to interact with the MultiChatAI API.
71
+ """
72
+ AVAILABLE_MODELS = [
73
+ # Llama Models
74
+ "llama-3.3-70b-versatile",
75
+ "llama-3.2-11b-vision-preview",
76
+ "deepseek-r1-distill-llama-70b",
77
+
78
+ # Cohere Models
79
+ # "command-r", >>>> NOT WORKING
80
+ # "command", >>>> NOT WORKING
81
+
82
+ # Google Models
83
+ # "gemini-1.5-flash-002", >>>> NOT WORKING
84
+ "gemma2-9b-it",
85
+ "gemini-2.0-flash",
86
+
87
+ # DeepInfra Models
88
+ "Sao10K/L3.1-70B-Euryale-v2.2",
89
+ "Gryphe/MythoMax-L2-13b",
90
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
91
+ "deepseek-ai/DeepSeek-V3",
92
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
93
+ "NousResearch/Hermes-3-Llama-3.1-405B",
94
+ # "gemma-2-27b-it", >>>> NOT WORKING
95
+
96
+ # Mistral Models
97
+ # "mistral-small-latest", >>>> NOT WORKING
98
+ # "codestral-latest", >>>> NOT WORKING
99
+ # "open-mistral-7b", >>>> NOT WORKING
100
+ # "open-mixtral-8x7b", >>>> NOT WORKING
101
+
102
+ # Alibaba Models
103
+ "Qwen/Qwen2.5-72B-Instruct",
104
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
105
+ "Qwen/QwQ-32B-Preview"
106
+ ]
107
+
55
108
  def __init__(
56
109
  self,
57
110
  is_conversation: bool = True,
@@ -68,20 +121,11 @@ class MultiChatAI(Provider):
68
121
  temperature: float = 0.5,
69
122
  presence_penalty: int = 0,
70
123
  frequency_penalty: int = 0,
71
- top_p: float = 1,
72
- logging: bool = False,
124
+ top_p: float = 1
73
125
  ):
74
- """Initializes the MultiChatAI API client with logging capabilities."""
75
- # Initialize logger first
76
- self.logger = Logger(
77
- name="MultiChatAI",
78
- format=LogFormat.MODERN_EMOJI,
79
-
80
- ) if logging else None
81
-
82
- if self.logger:
83
- self.logger.debug("Initializing MultiChatAI")
84
-
126
+ """Initializes the MultiChatAI API client."""
127
+ if model not in self.AVAILABLE_MODELS:
128
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
85
129
  self.session = requests.Session()
86
130
  self.is_conversation = is_conversation
87
131
  self.max_tokens_to_sample = max_tokens
@@ -106,9 +150,6 @@ class MultiChatAI(Provider):
106
150
  "user-agent": self.agent.random(),
107
151
  }
108
152
 
109
- if self.logger:
110
- self.logger.debug(f"Setting up session with headers: {self.headers}")
111
-
112
153
  self.session.headers.update(self.headers)
113
154
  self.session.proxies = proxies
114
155
  self.session.cookies.update({"session": uuid.uuid4().hex})
@@ -131,24 +172,17 @@ class MultiChatAI(Provider):
131
172
  )
132
173
  self.conversation.history_offset = history_offset
133
174
 
134
- # Get provider after logger initialization
135
175
  self.provider = self._get_provider_from_model(self.model)
136
176
  self.model_name = self.model
137
177
 
138
- if self.logger:
139
- self.logger.info(f"MultiChatAI initialized with model: {self.model}")
140
-
141
178
  def _get_endpoint(self) -> str:
142
179
  """Get the API endpoint for the current provider."""
143
- endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
144
- if self.logger:
145
- self.logger.debug(f"Using endpoint: {endpoint}")
146
- return endpoint
180
+ return MODEL_CONFIGS[self.provider]["endpoint"]
147
181
 
148
182
  def _get_chat_settings(self) -> Dict[str, Any]:
149
183
  """Get chat settings for the current model."""
150
184
  base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
151
- settings = {
185
+ return {
152
186
  "model": self.model,
153
187
  "prompt": self.system_prompt,
154
188
  "temperature": self.temperature,
@@ -157,45 +191,30 @@ class MultiChatAI(Provider):
157
191
  "includeWorkspaceInstructions": True,
158
192
  "embeddingsProvider": "openai"
159
193
  }
160
- if self.logger:
161
- self.logger.debug(f"Chat settings: {settings}")
162
- return settings
163
194
 
164
195
  def _get_system_message(self) -> str:
165
196
  """Generate system message with current date."""
166
197
  current_date = datetime.now().strftime("%d/%m/%Y")
167
- message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
168
- if self.logger:
169
- self.logger.debug(f"System message: {message}")
170
- return message
198
+ return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
171
199
 
172
200
  def _build_messages(self, conversation_prompt: str) -> list:
173
201
  """Build messages array based on provider type."""
174
202
  if self.provider == "google":
175
- messages = [
203
+ return [
176
204
  {"role": "user", "parts": self._get_system_message()},
177
205
  {"role": "model", "parts": "I will follow your instructions."},
178
206
  {"role": "user", "parts": conversation_prompt}
179
207
  ]
180
208
  else:
181
- messages = [
209
+ return [
182
210
  {"role": "system", "content": self._get_system_message()},
183
211
  {"role": "user", "content": conversation_prompt}
184
212
  ]
185
-
186
- if self.logger:
187
- self.logger.debug(f"Built messages: {messages}")
188
- return messages
189
213
 
190
214
  def _get_provider_from_model(self, model: str) -> str:
191
215
  """Determine the provider based on the model name."""
192
- if self.logger:
193
- self.logger.debug(f"Getting provider for model: {model}")
194
-
195
216
  for provider, config in MODEL_CONFIGS.items():
196
217
  if model in config["models"]:
197
- if self.logger:
198
- self.logger.info(f"Found provider: {provider} for model: {model}")
199
218
  return provider
200
219
 
201
220
  available_models = []
@@ -204,16 +223,10 @@ class MultiChatAI(Provider):
204
223
  available_models.append(f"{provider}/{model_name}")
205
224
 
206
225
  error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
207
- if self.logger:
208
- self.logger.error(error_msg)
209
226
  raise ValueError(error_msg)
210
227
 
211
228
  def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
212
- """Make the API request with proper error handling and logging."""
213
- if self.logger:
214
- self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
215
- self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
216
-
229
+ """Make the API request with proper error handling."""
217
230
  try:
218
231
  response = self.session.post(
219
232
  self._get_endpoint(),
@@ -222,15 +235,8 @@ class MultiChatAI(Provider):
222
235
  timeout=self.timeout,
223
236
  )
224
237
  response.raise_for_status()
225
-
226
- if self.logger:
227
- self.logger.info(f"Request successful: {response.status_code}")
228
- self.logger.debug(f"Response content: {response.text[:200]}...")
229
-
230
238
  return response
231
239
  except requests.exceptions.RequestException as e:
232
- if self.logger:
233
- self.logger.error(f"Request failed: {str(e)}")
234
240
  raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
235
241
 
236
242
  def ask(
@@ -241,21 +247,14 @@ class MultiChatAI(Provider):
241
247
  conversationally: bool = False,
242
248
  ) -> Dict[str, Any]:
243
249
  """Sends a prompt to the MultiChatAI API and returns the response."""
244
- if self.logger:
245
- self.logger.debug(f"ask() called with prompt: {prompt}")
246
-
247
250
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
248
251
  if optimizer:
249
252
  if optimizer in self.__available_optimizers:
250
- if self.logger:
251
- self.logger.info(f"Applying optimizer: {optimizer}")
252
253
  conversation_prompt = getattr(Optimizers, optimizer)(
253
254
  conversation_prompt if conversationally else prompt
254
255
  )
255
256
  else:
256
257
  error_msg = f"Optimizer is not one of {self.__available_optimizers}"
257
- if self.logger:
258
- self.logger.error(error_msg)
259
258
  raise exceptions.FailedToGenerateResponseError(error_msg)
260
259
 
261
260
  payload = {
@@ -269,15 +268,8 @@ class MultiChatAI(Provider):
269
268
  full_response = response.text.strip()
270
269
  self.last_response = {"text": full_response}
271
270
  self.conversation.update_chat_history(prompt, full_response)
272
-
273
- if self.logger:
274
- self.logger.info("Successfully processed response")
275
- self.logger.debug(f"Final response: {full_response[:200]}...")
276
-
277
271
  return self.last_response
278
272
  except json.JSONDecodeError as e:
279
- if self.logger:
280
- self.logger.error(f"Failed to decode JSON response: {e}")
281
273
  raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
282
274
 
283
275
  def chat(
@@ -286,17 +278,10 @@ class MultiChatAI(Provider):
286
278
  optimizer: str = None,
287
279
  conversationally: bool = False,
288
280
  ) -> str:
289
- """Generate response with logging."""
290
- if self.logger:
291
- self.logger.debug(f"chat() called with prompt: {prompt}")
292
-
281
+ """Generate response."""
293
282
  response = self.ask(
294
283
  prompt, optimizer=optimizer, conversationally=conversationally
295
284
  )
296
-
297
- if self.logger:
298
- self.logger.info("Chat response generated successfully")
299
-
300
285
  return self.get_message(response)
301
286
 
302
287
  def get_message(self, response: Dict[str, Any] | str) -> str:
@@ -309,23 +294,32 @@ class MultiChatAI(Provider):
309
294
  Returns:
310
295
  str: The extracted message text
311
296
  """
312
- if self.logger:
313
- self.logger.debug(f"Extracting message from response type: {type(response)}")
314
-
315
297
  if isinstance(response, dict):
316
- message = response.get("text", "")
317
- if self.logger:
318
- self.logger.debug(f"Extracted message from dict: {message[:200]}...")
319
- return message
298
+ return response.get("text", "")
320
299
  return str(response)
321
300
 
322
301
  if __name__ == "__main__":
323
- from rich import print
324
-
325
- # Example usage with logging enabled
326
- ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
327
- try:
328
- response = ai.chat("What is quantum computing?")
329
- print(response)
330
- except Exception as e:
331
- print(f"Error: {str(e)}")
302
+ print("-" * 80)
303
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
304
+ print("-" * 80)
305
+
306
+ # Test all available models
307
+ working = 0
308
+ total = len(MultiChatAI.AVAILABLE_MODELS)
309
+
310
+ for model in MultiChatAI.AVAILABLE_MODELS:
311
+ try:
312
+ test_ai = MultiChatAI(model=model, timeout=60)
313
+ response = test_ai.chat("Say 'Hello' in one word")
314
+ response_text = response
315
+
316
+ if response_text and len(response_text.strip()) > 0:
317
+ status = "✓"
318
+ # Truncate response if too long
319
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
320
+ else:
321
+ status = "✗"
322
+ display_text = "Empty or invalid response"
323
+ print(f"{model:<50} {status:<10} {display_text}")
324
+ except Exception as e:
325
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -9,6 +9,7 @@ from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
12
+
12
13
  class Talkai(Provider):
13
14
  """
14
15
  A class to interact with the Talkai.info API.