webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,11 @@ class KOALA(Provider):
12
12
  A class to interact with the Koala.sh API.
13
13
  """
14
14
 
15
+ AVAILABLE_MODELS = [
16
+ "gpt-4o-mini",
17
+ "gpt-4o",
18
+ ]
19
+
15
20
  def __init__(
16
21
  self,
17
22
  is_conversation: bool = True,
@@ -23,7 +28,7 @@ class KOALA(Provider):
23
28
  proxies: dict = {},
24
29
  history_offset: int = 10250,
25
30
  act: str = None,
26
- model: str = "gpt-4o-mini",
31
+ model: str = "gpt-4o",
27
32
  web_search: bool = True,
28
33
 
29
34
  ) -> None:
@@ -44,6 +49,9 @@ class KOALA(Provider):
44
49
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
45
50
  model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
46
51
  """
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
+
47
55
  self.session = requests.Session()
48
56
  self.is_conversation = is_conversation
49
57
  self.max_tokens_to_sample = max_tokens
@@ -1,4 +1,3 @@
1
-
2
1
  import requests
3
2
  import json
4
3
 
@@ -8,11 +7,10 @@ from webscout.AIutel import AwesomePrompts
8
7
  from webscout.AIbase import Provider
9
8
  from webscout import exceptions
10
9
  from webscout import LitAgent as Lit
11
- from webscout.Litlogger import Logger, LogFormat
12
10
 
13
11
  class LlamaTutor(Provider):
14
12
  """
15
- A class to interact with the LlamaTutor API (Together.ai) with comprehensive logging.
13
+ A class to interact with the LlamaTutor API (Together.ai)
16
14
  """
17
15
 
18
16
  def __init__(
@@ -26,20 +24,11 @@ class LlamaTutor(Provider):
26
24
  proxies: dict = {},
27
25
  history_offset: int = 10250,
28
26
  act: str = None,
29
- system_prompt: str = "You are a helpful AI assistant.",
30
- logging: bool = False
27
+ system_prompt: str = "You are a helpful AI assistant."
31
28
  ):
32
29
  """
33
- Initializes the LlamaTutor API with given parameters and logging capabilities.
30
+ Initializes the LlamaTutor API with given parameters.
34
31
  """
35
- self.logger = Logger(
36
- name="LlamaTutor",
37
- format=LogFormat.MODERN_EMOJI,
38
-
39
- ) if logging else None
40
-
41
- if self.logger:
42
- self.logger.info("Initializing LlamaTutor API")
43
32
 
44
33
  self.session = requests.Session()
45
34
  self.is_conversation = is_conversation
@@ -74,9 +63,6 @@ class LlamaTutor(Provider):
74
63
  )
75
64
 
76
65
  self.session.headers.update(self.headers)
77
-
78
- if self.logger:
79
- self.logger.debug("Headers configured and session updated")
80
66
 
81
67
  Conversation.intro = (
82
68
  AwesomePrompts().get_act(
@@ -92,9 +78,6 @@ class LlamaTutor(Provider):
92
78
  self.conversation.history_offset = history_offset
93
79
  self.session.proxies = proxies
94
80
 
95
- if self.logger:
96
- self.logger.info("LlamaTutor initialized successfully")
97
-
98
81
  def ask(
99
82
  self,
100
83
  prompt: str,
@@ -103,10 +86,7 @@ class LlamaTutor(Provider):
103
86
  optimizer: str = None,
104
87
  conversationally: bool = False,
105
88
  ) -> dict:
106
- """Chat with LlamaTutor with logging capabilities"""
107
- if self.logger:
108
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
109
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
89
+ """Chat with LlamaTutor"""
110
90
 
111
91
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
92
  if optimizer:
@@ -114,11 +94,7 @@ class LlamaTutor(Provider):
114
94
  conversation_prompt = getattr(Optimizers, optimizer)(
115
95
  conversation_prompt if conversationally else prompt
116
96
  )
117
- if self.logger:
118
- self.logger.debug(f"Applied optimizer: {optimizer}")
119
97
  else:
120
- if self.logger:
121
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
122
98
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
123
99
 
124
100
  payload = {
@@ -136,8 +112,6 @@ class LlamaTutor(Provider):
136
112
 
137
113
  def for_stream():
138
114
  try:
139
- if self.logger:
140
- self.logger.debug("Initiating streaming request to API")
141
115
 
142
116
  response = requests.post(
143
117
  self.api_endpoint,
@@ -148,9 +122,6 @@ class LlamaTutor(Provider):
148
122
  )
149
123
  response.raise_for_status()
150
124
 
151
- if self.logger:
152
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
153
-
154
125
  full_response = ''
155
126
  for line in response.iter_lines(decode_unicode=True):
156
127
  if line:
@@ -162,8 +133,6 @@ class LlamaTutor(Provider):
162
133
  full_response += json_data["text"]
163
134
  yield json_data["text"] if raw else dict(text=json_data["text"])
164
135
  except json.JSONDecodeError as e:
165
- if self.logger:
166
- self.logger.warning(f"Failed to parse response line: {e}")
167
136
  continue
168
137
 
169
138
  self.last_response.update(dict(text=full_response))
@@ -172,17 +141,11 @@ class LlamaTutor(Provider):
172
141
  )
173
142
 
174
143
  except requests.exceptions.HTTPError as http_err:
175
- if self.logger:
176
- self.logger.error(f"HTTP error occurred: {http_err}")
177
144
  raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
178
145
  except requests.exceptions.RequestException as err:
179
- if self.logger:
180
- self.logger.error(f"Request error occurred: {err}")
181
146
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
182
147
 
183
148
  def for_non_stream():
184
- if self.logger:
185
- self.logger.debug("Processing non-streaming request")
186
149
  for _ in for_stream():
187
150
  pass
188
151
  return self.last_response
@@ -196,9 +159,7 @@ class LlamaTutor(Provider):
196
159
  optimizer: str = None,
197
160
  conversationally: bool = False,
198
161
  ) -> str:
199
- """Generate response with logging capabilities"""
200
- if self.logger:
201
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
162
+ """Generate response"""
202
163
 
203
164
  def for_stream():
204
165
  for response in self.ask(
@@ -225,8 +186,7 @@ class LlamaTutor(Provider):
225
186
 
226
187
  if __name__ == "__main__":
227
188
  from rich import print
228
- # Enable logging for testing
229
- ai = LlamaTutor(logging=True)
189
+ ai = LlamaTutor()
230
190
  response = ai.chat("Write a poem about AI", stream=True)
231
191
  for chunk in response:
232
192
  print(chunk, end="", flush=True)
@@ -1,4 +1,3 @@
1
-
2
1
  import requests
3
2
  import json
4
3
  from typing import Any, Dict, Optional, Generator, List
@@ -8,12 +7,11 @@ from webscout.AIutel import Conversation
8
7
  from webscout.AIutel import AwesomePrompts
9
8
  from webscout.AIbase import Provider
10
9
  from webscout import exceptions
11
- from webscout.Litlogger import Logger, LogFormat
12
10
  from webscout import LitAgent as Lit
13
11
 
14
12
  class LLMChat(Provider):
15
13
  """
16
- A class to interact with the LLMChat API with comprehensive logging.
14
+ A class to interact with the LLMChat API
17
15
  """
18
16
 
19
17
  AVAILABLE_MODELS = [
@@ -37,23 +35,13 @@ class LLMChat(Provider):
37
35
  history_offset: int = 10250,
38
36
  act: str = None,
39
37
  model: str = "@cf/meta/llama-3.1-70b-instruct",
40
- system_prompt: str = "You are a helpful assistant.",
41
- logging: bool = False
38
+ system_prompt: str = "You are a helpful assistant."
42
39
  ):
43
40
  """
44
- Initializes the LLMChat API with given parameters and logging capabilities.
41
+ Initializes the LLMChat API with given parameters.
45
42
  """
46
- self.logger = Logger(
47
- name="LLMChat",
48
- format=LogFormat.MODERN_EMOJI,
49
- ) if logging else None
50
-
51
- if self.logger:
52
- self.logger.info(f"Initializing LLMChat with model: {model}")
53
43
 
54
44
  if model not in self.AVAILABLE_MODELS:
55
- if self.logger:
56
- self.logger.error(f"Invalid model selected: {model}")
57
45
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
46
 
59
47
  self.session = requests.Session()
@@ -93,9 +81,6 @@ class LLMChat(Provider):
93
81
  self.conversation.history_offset = history_offset
94
82
  self.session.proxies = proxies
95
83
 
96
- if self.logger:
97
- self.logger.info("LLMChat initialized successfully")
98
-
99
84
  def ask(
100
85
  self,
101
86
  prompt: str,
@@ -105,9 +90,6 @@ class LLMChat(Provider):
105
90
  conversationally: bool = False,
106
91
  ) -> Dict[str, Any]:
107
92
  """Chat with LLMChat with logging capabilities"""
108
- if self.logger:
109
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
110
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
111
93
 
112
94
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
113
95
  if optimizer:
@@ -115,11 +97,7 @@ class LLMChat(Provider):
115
97
  conversation_prompt = getattr(Optimizers, optimizer)(
116
98
  conversation_prompt if conversationally else prompt
117
99
  )
118
- if self.logger:
119
- self.logger.debug(f"Applied optimizer: {optimizer}")
120
100
  else:
121
- if self.logger:
122
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
123
101
  raise exceptions.FailedToGenerateResponseError(
124
102
  f"Optimizer is not one of {self.__available_optimizers}"
125
103
  )
@@ -136,14 +114,9 @@ class LLMChat(Provider):
136
114
 
137
115
  def for_stream():
138
116
  try:
139
- if self.logger:
140
- self.logger.debug("Initiating streaming request to API")
141
117
 
142
118
  with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
143
119
  response.raise_for_status()
144
-
145
- if self.logger:
146
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
147
120
 
148
121
  full_response = ""
149
122
  for line in response.iter_lines():
@@ -158,9 +131,7 @@ class LLMChat(Provider):
158
131
  yield response_text if raw else dict(text=response_text)
159
132
  except json.JSONDecodeError:
160
133
  if line.strip() != 'data: [DONE]':
161
- if self.logger:
162
- self.logger.warning(f"Failed to parse line: {line}")
163
- continue
134
+ continue
164
135
 
165
136
  self.last_response.update(dict(text=full_response))
166
137
  self.conversation.update_chat_history(
@@ -168,21 +139,14 @@ class LLMChat(Provider):
168
139
  )
169
140
 
170
141
  except requests.exceptions.RequestException as e:
171
- if self.logger:
172
- self.logger.error(f"API request failed: {str(e)}")
173
142
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
174
143
 
175
144
  def for_non_stream():
176
- if self.logger:
177
- self.logger.debug("Processing non-streaming request")
178
-
145
+
179
146
  full_response = ""
180
147
  for line in for_stream():
181
148
  full_response += line['text'] if not raw else line
182
-
183
- if self.logger:
184
- self.logger.debug("Response processing completed")
185
-
149
+
186
150
  return dict(text=full_response)
187
151
 
188
152
  return for_stream() if stream else for_non_stream()
@@ -195,8 +159,6 @@ class LLMChat(Provider):
195
159
  conversationally: bool = False,
196
160
  ) -> str | Generator[str, None, None]:
197
161
  """Generate response with logging capabilities"""
198
- if self.logger:
199
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
200
162
 
201
163
  def for_stream():
202
164
  for response in self.ask(
@@ -223,8 +185,7 @@ class LLMChat(Provider):
223
185
 
224
186
  if __name__ == "__main__":
225
187
  from rich import print
226
- # Enable logging for testing
227
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
188
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
228
189
  response = ai.chat("What's the meaning of life?", stream=True)
229
190
  for chunk in response:
230
191
  print(chunk, end="", flush=True)
@@ -6,7 +6,6 @@ from datetime import datetime
6
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
- from webscout.Litlogger import Logger, LogFormat
10
9
  from webscout.litagent import LitAgent
11
10
 
12
11
  # Model configurations
@@ -21,14 +20,18 @@ MODEL_CONFIGS = {
21
20
  },
22
21
  "cohere": {
23
22
  "endpoint": "https://www.multichatai.com/api/chat/cohere",
24
- "models": {"command-r": {"contextLength": 128000}},
23
+ "models": {
24
+ "command-r": {"contextLength": 128000},
25
+ "command": {"contextLength": 4096},
26
+ },
25
27
  },
26
28
  "google": {
27
29
  "endpoint": "https://www.multichatai.com/api/chat/google",
28
30
  "models": {
29
31
  "gemini-1.5-flash-002": {"contextLength": 1048576},
30
32
  "gemma2-9b-it": {"contextLength": 8192},
31
- },
33
+ "gemini-2.0-flash": {"contextLength": 128000},
34
+ },
32
35
  "message_format": "parts",
33
36
  },
34
37
  "deepinfra": {
@@ -38,6 +41,9 @@ MODEL_CONFIGS = {
38
41
  "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
39
42
  "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
40
43
  "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
44
+ "meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
45
+ "NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
46
+ "gemma-2-27b-it": {"contextLength": 8192},
41
47
  },
42
48
  },
43
49
  "mistral": {
@@ -49,6 +55,14 @@ MODEL_CONFIGS = {
49
55
  "open-mixtral-8x7b": {"contextLength": 8000},
50
56
  },
51
57
  },
58
+ "alibaba": {
59
+ "endpoint": "https://www.multichatai.com/api/chat/alibaba",
60
+ "models": {
61
+ "Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
62
+ "Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
63
+ "Qwen/QwQ-32B-Preview": {"contextLength": 32768},
64
+ },
65
+ },
52
66
  }
53
67
 
54
68
  class MultiChatAI(Provider):
@@ -68,20 +82,9 @@ class MultiChatAI(Provider):
68
82
  temperature: float = 0.5,
69
83
  presence_penalty: int = 0,
70
84
  frequency_penalty: int = 0,
71
- top_p: float = 1,
72
- logging: bool = False,
85
+ top_p: float = 1
73
86
  ):
74
- """Initializes the MultiChatAI API client with logging capabilities."""
75
- # Initialize logger first
76
- self.logger = Logger(
77
- name="MultiChatAI",
78
- format=LogFormat.MODERN_EMOJI,
79
-
80
- ) if logging else None
81
-
82
- if self.logger:
83
- self.logger.debug("Initializing MultiChatAI")
84
-
87
+ """Initializes the MultiChatAI API client."""
85
88
  self.session = requests.Session()
86
89
  self.is_conversation = is_conversation
87
90
  self.max_tokens_to_sample = max_tokens
@@ -106,9 +109,6 @@ class MultiChatAI(Provider):
106
109
  "user-agent": self.agent.random(),
107
110
  }
108
111
 
109
- if self.logger:
110
- self.logger.debug(f"Setting up session with headers: {self.headers}")
111
-
112
112
  self.session.headers.update(self.headers)
113
113
  self.session.proxies = proxies
114
114
  self.session.cookies.update({"session": uuid.uuid4().hex})
@@ -131,24 +131,17 @@ class MultiChatAI(Provider):
131
131
  )
132
132
  self.conversation.history_offset = history_offset
133
133
 
134
- # Get provider after logger initialization
135
134
  self.provider = self._get_provider_from_model(self.model)
136
135
  self.model_name = self.model
137
136
 
138
- if self.logger:
139
- self.logger.info(f"MultiChatAI initialized with model: {self.model}")
140
-
141
137
  def _get_endpoint(self) -> str:
142
138
  """Get the API endpoint for the current provider."""
143
- endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
144
- if self.logger:
145
- self.logger.debug(f"Using endpoint: {endpoint}")
146
- return endpoint
139
+ return MODEL_CONFIGS[self.provider]["endpoint"]
147
140
 
148
141
  def _get_chat_settings(self) -> Dict[str, Any]:
149
142
  """Get chat settings for the current model."""
150
143
  base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
151
- settings = {
144
+ return {
152
145
  "model": self.model,
153
146
  "prompt": self.system_prompt,
154
147
  "temperature": self.temperature,
@@ -157,45 +150,30 @@ class MultiChatAI(Provider):
157
150
  "includeWorkspaceInstructions": True,
158
151
  "embeddingsProvider": "openai"
159
152
  }
160
- if self.logger:
161
- self.logger.debug(f"Chat settings: {settings}")
162
- return settings
163
153
 
164
154
  def _get_system_message(self) -> str:
165
155
  """Generate system message with current date."""
166
156
  current_date = datetime.now().strftime("%d/%m/%Y")
167
- message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
168
- if self.logger:
169
- self.logger.debug(f"System message: {message}")
170
- return message
157
+ return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
171
158
 
172
159
  def _build_messages(self, conversation_prompt: str) -> list:
173
160
  """Build messages array based on provider type."""
174
161
  if self.provider == "google":
175
- messages = [
162
+ return [
176
163
  {"role": "user", "parts": self._get_system_message()},
177
164
  {"role": "model", "parts": "I will follow your instructions."},
178
165
  {"role": "user", "parts": conversation_prompt}
179
166
  ]
180
167
  else:
181
- messages = [
168
+ return [
182
169
  {"role": "system", "content": self._get_system_message()},
183
170
  {"role": "user", "content": conversation_prompt}
184
171
  ]
185
-
186
- if self.logger:
187
- self.logger.debug(f"Built messages: {messages}")
188
- return messages
189
172
 
190
173
  def _get_provider_from_model(self, model: str) -> str:
191
174
  """Determine the provider based on the model name."""
192
- if self.logger:
193
- self.logger.debug(f"Getting provider for model: {model}")
194
-
195
175
  for provider, config in MODEL_CONFIGS.items():
196
176
  if model in config["models"]:
197
- if self.logger:
198
- self.logger.info(f"Found provider: {provider} for model: {model}")
199
177
  return provider
200
178
 
201
179
  available_models = []
@@ -204,16 +182,10 @@ class MultiChatAI(Provider):
204
182
  available_models.append(f"{provider}/{model_name}")
205
183
 
206
184
  error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
207
- if self.logger:
208
- self.logger.error(error_msg)
209
185
  raise ValueError(error_msg)
210
186
 
211
187
  def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
212
- """Make the API request with proper error handling and logging."""
213
- if self.logger:
214
- self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
215
- self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
216
-
188
+ """Make the API request with proper error handling."""
217
189
  try:
218
190
  response = self.session.post(
219
191
  self._get_endpoint(),
@@ -222,15 +194,8 @@ class MultiChatAI(Provider):
222
194
  timeout=self.timeout,
223
195
  )
224
196
  response.raise_for_status()
225
-
226
- if self.logger:
227
- self.logger.info(f"Request successful: {response.status_code}")
228
- self.logger.debug(f"Response content: {response.text[:200]}...")
229
-
230
197
  return response
231
198
  except requests.exceptions.RequestException as e:
232
- if self.logger:
233
- self.logger.error(f"Request failed: {str(e)}")
234
199
  raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
235
200
 
236
201
  def ask(
@@ -241,21 +206,14 @@ class MultiChatAI(Provider):
241
206
  conversationally: bool = False,
242
207
  ) -> Dict[str, Any]:
243
208
  """Sends a prompt to the MultiChatAI API and returns the response."""
244
- if self.logger:
245
- self.logger.debug(f"ask() called with prompt: {prompt}")
246
-
247
209
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
248
210
  if optimizer:
249
211
  if optimizer in self.__available_optimizers:
250
- if self.logger:
251
- self.logger.info(f"Applying optimizer: {optimizer}")
252
212
  conversation_prompt = getattr(Optimizers, optimizer)(
253
213
  conversation_prompt if conversationally else prompt
254
214
  )
255
215
  else:
256
216
  error_msg = f"Optimizer is not one of {self.__available_optimizers}"
257
- if self.logger:
258
- self.logger.error(error_msg)
259
217
  raise exceptions.FailedToGenerateResponseError(error_msg)
260
218
 
261
219
  payload = {
@@ -269,15 +227,8 @@ class MultiChatAI(Provider):
269
227
  full_response = response.text.strip()
270
228
  self.last_response = {"text": full_response}
271
229
  self.conversation.update_chat_history(prompt, full_response)
272
-
273
- if self.logger:
274
- self.logger.info("Successfully processed response")
275
- self.logger.debug(f"Final response: {full_response[:200]}...")
276
-
277
230
  return self.last_response
278
231
  except json.JSONDecodeError as e:
279
- if self.logger:
280
- self.logger.error(f"Failed to decode JSON response: {e}")
281
232
  raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
282
233
 
283
234
  def chat(
@@ -286,17 +237,10 @@ class MultiChatAI(Provider):
286
237
  optimizer: str = None,
287
238
  conversationally: bool = False,
288
239
  ) -> str:
289
- """Generate response with logging."""
290
- if self.logger:
291
- self.logger.debug(f"chat() called with prompt: {prompt}")
292
-
240
+ """Generate response."""
293
241
  response = self.ask(
294
242
  prompt, optimizer=optimizer, conversationally=conversationally
295
243
  )
296
-
297
- if self.logger:
298
- self.logger.info("Chat response generated successfully")
299
-
300
244
  return self.get_message(response)
301
245
 
302
246
  def get_message(self, response: Dict[str, Any] | str) -> str:
@@ -309,21 +253,15 @@ class MultiChatAI(Provider):
309
253
  Returns:
310
254
  str: The extracted message text
311
255
  """
312
- if self.logger:
313
- self.logger.debug(f"Extracting message from response type: {type(response)}")
314
-
315
256
  if isinstance(response, dict):
316
- message = response.get("text", "")
317
- if self.logger:
318
- self.logger.debug(f"Extracted message from dict: {message[:200]}...")
319
- return message
257
+ return response.get("text", "")
320
258
  return str(response)
321
259
 
322
260
  if __name__ == "__main__":
323
261
  from rich import print
324
262
 
325
- # Example usage with logging enabled
326
- ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
263
+ # Example usage
264
+ ai = MultiChatAI(model="Qwen/QwQ-32B-Preview")
327
265
  try:
328
266
  response = ai.chat("What is quantum computing?")
329
267
  print(response)