webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,41 +1,57 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Optional, Generator
4
-
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
3
+ import uuid
4
+ from typing import Any, Dict
5
+ from datetime import datetime
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
7
  from webscout.AIbase import Provider
9
8
  from webscout import exceptions
9
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
10
+ from webscout.litagent import LitAgent
10
11
 
11
12
  # Model configurations
12
13
  MODEL_CONFIGS = {
13
14
  "llama": {
14
15
  "endpoint": "https://www.multichatai.com/api/chat/meta",
15
16
  "models": {
16
- "llama-3.1-70b-versatile": {"contextLength": 8192},
17
- "llama-3.2-90b-vision-preview": {"contextLength": 32768},
17
+ "llama-3.3-70b-versatile": {"contextLength": 131072},
18
18
  "llama-3.2-11b-vision-preview": {"contextLength": 32768},
19
- },
20
- },
21
- "alibaba": {
22
- "endpoint": "https://www.multichatai.com/api/chat/alibaba",
23
- "models": {
24
- "Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
25
- "Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
19
+ "deepseek-r1-distill-llama-70b": {"contextLength": 128000},
26
20
  },
27
21
  },
28
22
  "cohere": {
29
23
  "endpoint": "https://www.multichatai.com/api/chat/cohere",
30
24
  "models": {"command-r": {"contextLength": 128000}},
31
25
  },
26
+ "google": {
27
+ "endpoint": "https://www.multichatai.com/api/chat/google",
28
+ "models": {
29
+ "gemini-1.5-flash-002": {"contextLength": 1048576},
30
+ "gemma2-9b-it": {"contextLength": 8192},
31
+ },
32
+ "message_format": "parts",
33
+ },
34
+ "deepinfra": {
35
+ "endpoint": "https://www.multichatai.com/api/chat/deepinfra",
36
+ "models": {
37
+ "Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
38
+ "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
39
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
40
+ "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
41
+ },
42
+ },
43
+ "mistral": {
44
+ "endpoint": "https://www.multichatai.com/api/chat/mistral",
45
+ "models": {
46
+ "mistral-small-latest": {"contextLength": 32000},
47
+ "codestral-latest": {"contextLength": 32000},
48
+ "open-mistral-7b": {"contextLength": 8000},
49
+ "open-mixtral-8x7b": {"contextLength": 8000},
50
+ },
51
+ },
32
52
  }
33
53
 
34
54
  class MultiChatAI(Provider):
35
- """
36
- A class to interact with the MultiChatAI API.
37
- """
38
-
39
55
  def __init__(
40
56
  self,
41
57
  is_conversation: bool = True,
@@ -47,14 +63,25 @@ class MultiChatAI(Provider):
47
63
  proxies: dict = {},
48
64
  history_offset: int = 10250,
49
65
  act: str = None,
50
- model: str = "llama-3.1-70b-versatile", # Default model
51
- system_prompt: str = "You are a helpful assistant.",
66
+ model: str = "llama-3.3-70b-versatile",
67
+ system_prompt: str = "You are a friendly, helpful AI assistant.",
52
68
  temperature: float = 0.5,
53
69
  presence_penalty: int = 0,
54
70
  frequency_penalty: int = 0,
55
71
  top_p: float = 1,
72
+ logging: bool = False,
56
73
  ):
57
- """Initializes the MultiChatAI API client."""
74
+ """Initializes the MultiChatAI API client with logging capabilities."""
75
+ # Initialize logger first
76
+ self.logger = LitLogger(
77
+ name="MultiChatAI",
78
+ format=LogFormat.MODERN_EMOJI,
79
+ color_scheme=ColorScheme.CYBERPUNK
80
+ ) if logging else None
81
+
82
+ if self.logger:
83
+ self.logger.debug("Initializing MultiChatAI")
84
+
58
85
  self.session = requests.Session()
59
86
  self.is_conversation = is_conversation
60
87
  self.max_tokens_to_sample = max_tokens
@@ -66,22 +93,31 @@ class MultiChatAI(Provider):
66
93
  self.presence_penalty = presence_penalty
67
94
  self.frequency_penalty = frequency_penalty
68
95
  self.top_p = top_p
96
+
97
+ # Initialize LitAgent for user agent generation
98
+ self.agent = LitAgent()
99
+
69
100
  self.headers = {
70
101
  "accept": "*/*",
71
102
  "accept-language": "en-US,en;q=0.9",
72
103
  "content-type": "text/plain;charset=UTF-8",
73
104
  "origin": "https://www.multichatai.com",
74
105
  "referer": "https://www.multichatai.com/",
75
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
106
+ "user-agent": self.agent.random(),
76
107
  }
108
+
109
+ if self.logger:
110
+ self.logger.debug(f"Setting up session with headers: {self.headers}")
111
+
77
112
  self.session.headers.update(self.headers)
78
113
  self.session.proxies = proxies
114
+ self.session.cookies.update({"session": uuid.uuid4().hex})
79
115
 
80
116
  self.__available_optimizers = (
81
- method
82
- for method in dir(Optimizers)
117
+ method for method in dir(Optimizers)
83
118
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
119
  )
120
+
85
121
  Conversation.intro = (
86
122
  AwesomePrompts().get_act(
87
123
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -89,41 +125,30 @@ class MultiChatAI(Provider):
89
125
  if act
90
126
  else intro or Conversation.intro
91
127
  )
128
+
92
129
  self.conversation = Conversation(
93
130
  is_conversation, self.max_tokens_to_sample, filepath, update_file
94
131
  )
95
132
  self.conversation.history_offset = history_offset
96
133
 
97
- # Parse provider and model name
98
- self.provider = "llama" # Default provider
134
+ # Get provider after logger initialization
135
+ self.provider = self._get_provider_from_model(self.model)
99
136
  self.model_name = self.model
100
-
101
- # Check if model exists in any provider
102
- model_found = False
103
- for provider, config in MODEL_CONFIGS.items():
104
- if self.model in config["models"]:
105
- self.provider = provider
106
- self.model_name = self.model
107
- model_found = True
108
- break
109
-
110
- if not model_found:
111
- available_models = []
112
- for provider, config in MODEL_CONFIGS.items():
113
- for model in config["models"].keys():
114
- available_models.append(f"{provider}/{model}")
115
- raise ValueError(
116
- f"Invalid model: {self.model}\nAvailable models: {', '.join(available_models)}"
117
- )
137
+
138
+ if self.logger:
139
+ self.logger.info(f"MultiChatAI initialized with model: {self.model}")
118
140
 
119
141
  def _get_endpoint(self) -> str:
120
142
  """Get the API endpoint for the current provider."""
121
- return MODEL_CONFIGS[self.provider]["endpoint"]
143
+ endpoint = MODEL_CONFIGS[self.provider]["endpoint"]
144
+ if self.logger:
145
+ self.logger.debug(f"Using endpoint: {endpoint}")
146
+ return endpoint
122
147
 
123
148
  def _get_chat_settings(self) -> Dict[str, Any]:
124
149
  """Get chat settings for the current model."""
125
150
  base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
126
- return {
151
+ settings = {
127
152
  "model": self.model,
128
153
  "prompt": self.system_prompt,
129
154
  "temperature": self.temperature,
@@ -132,99 +157,175 @@ class MultiChatAI(Provider):
132
157
  "includeWorkspaceInstructions": True,
133
158
  "embeddingsProvider": "openai"
134
159
  }
160
+ if self.logger:
161
+ self.logger.debug(f"Chat settings: {settings}")
162
+ return settings
163
+
164
+ def _get_system_message(self) -> str:
165
+ """Generate system message with current date."""
166
+ current_date = datetime.now().strftime("%d/%m/%Y")
167
+ message = f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
168
+ if self.logger:
169
+ self.logger.debug(f"System message: {message}")
170
+ return message
171
+
172
+ def _build_messages(self, conversation_prompt: str) -> list:
173
+ """Build messages array based on provider type."""
174
+ if self.provider == "google":
175
+ messages = [
176
+ {"role": "user", "parts": self._get_system_message()},
177
+ {"role": "model", "parts": "I will follow your instructions."},
178
+ {"role": "user", "parts": conversation_prompt}
179
+ ]
180
+ else:
181
+ messages = [
182
+ {"role": "system", "content": self._get_system_message()},
183
+ {"role": "user", "content": conversation_prompt}
184
+ ]
185
+
186
+ if self.logger:
187
+ self.logger.debug(f"Built messages: {messages}")
188
+ return messages
189
+
190
+ def _get_provider_from_model(self, model: str) -> str:
191
+ """Determine the provider based on the model name."""
192
+ if self.logger:
193
+ self.logger.debug(f"Getting provider for model: {model}")
194
+
195
+ for provider, config in MODEL_CONFIGS.items():
196
+ if model in config["models"]:
197
+ if self.logger:
198
+ self.logger.info(f"Found provider: {provider} for model: {model}")
199
+ return provider
200
+
201
+ available_models = []
202
+ for provider, config in MODEL_CONFIGS.items():
203
+ for model_name in config["models"].keys():
204
+ available_models.append(f"{provider}/{model_name}")
205
+
206
+ error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
207
+ if self.logger:
208
+ self.logger.error(error_msg)
209
+ raise ValueError(error_msg)
210
+
211
+ def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
212
+ """Make the API request with proper error handling and logging."""
213
+ if self.logger:
214
+ self.logger.debug(f"Making request to endpoint: {self._get_endpoint()}")
215
+ self.logger.debug(f"Request payload: {json.dumps(payload, indent=2)}")
216
+
217
+ try:
218
+ response = self.session.post(
219
+ self._get_endpoint(),
220
+ headers=self.headers,
221
+ json=payload,
222
+ timeout=self.timeout,
223
+ )
224
+ response.raise_for_status()
225
+
226
+ if self.logger:
227
+ self.logger.info(f"Request successful: {response.status_code}")
228
+ self.logger.debug(f"Response content: {response.text[:200]}...")
229
+
230
+ return response
231
+ except requests.exceptions.RequestException as e:
232
+ if self.logger:
233
+ self.logger.error(f"Request failed: {str(e)}")
234
+ raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
135
235
 
136
236
  def ask(
137
237
  self,
138
238
  prompt: str,
139
- stream: bool = False,
140
239
  raw: bool = False,
141
240
  optimizer: str = None,
142
241
  conversationally: bool = False,
143
- ) -> Dict[str, Any] | Generator:
242
+ ) -> Dict[str, Any]:
144
243
  """Sends a prompt to the MultiChatAI API and returns the response."""
244
+ if self.logger:
245
+ self.logger.debug(f"ask() called with prompt: {prompt}")
246
+
145
247
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
146
248
  if optimizer:
147
249
  if optimizer in self.__available_optimizers:
250
+ if self.logger:
251
+ self.logger.info(f"Applying optimizer: {optimizer}")
148
252
  conversation_prompt = getattr(Optimizers, optimizer)(
149
253
  conversation_prompt if conversationally else prompt
150
254
  )
151
255
  else:
152
- raise exceptions.FailedToGenerateResponseError(
153
- f"Optimizer is not one of {self.__available_optimizers}"
154
- )
256
+ error_msg = f"Optimizer is not one of {self.__available_optimizers}"
257
+ if self.logger:
258
+ self.logger.error(error_msg)
259
+ raise exceptions.FailedToGenerateResponseError(error_msg)
155
260
 
156
261
  payload = {
157
262
  "chatSettings": self._get_chat_settings(),
158
- "messages": [
159
- {"role": "system", "content": self.system_prompt},
160
- {"role": "user", "content": conversation_prompt},
161
- ],
263
+ "messages": self._build_messages(conversation_prompt),
162
264
  "customModelId": "",
163
265
  }
164
266
 
267
+ response = self._make_request(payload)
165
268
  try:
166
- response = self.session.post(
167
- self._get_endpoint(),
168
- headers=self.headers,
169
- json=payload,
170
- stream=True,
171
- timeout=self.timeout,
172
- )
173
- response.raise_for_status()
174
-
175
- full_response = ""
176
- for line in response.iter_lines():
177
- if line:
178
- decoded_line = line.decode("utf-8")
179
- if stream:
180
- yield {"text": decoded_line}
181
- full_response += decoded_line
182
-
183
- self.last_response = {"text": full_response.strip()}
184
- self.conversation.update_chat_history(prompt, full_response.strip())
269
+ full_response = response.text.strip()
270
+ self.last_response = {"text": full_response}
271
+ self.conversation.update_chat_history(prompt, full_response)
185
272
 
186
- if not stream:
187
- return self.last_response
188
-
189
- except requests.exceptions.RequestException as e:
190
- raise exceptions.ProviderConnectionError(f"API request failed: {e}") from e
273
+ if self.logger:
274
+ self.logger.info("Successfully processed response")
275
+ self.logger.debug(f"Final response: {full_response[:200]}...")
276
+
277
+ return self.last_response
191
278
  except json.JSONDecodeError as e:
192
- raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
193
- except Exception as e:
194
- raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {e}") from e
279
+ if self.logger:
280
+ self.logger.error(f"Failed to decode JSON response: {e}")
281
+ raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
195
282
 
196
283
  def chat(
197
284
  self,
198
285
  prompt: str,
199
- stream: bool = False,
200
286
  optimizer: str = None,
201
287
  conversationally: bool = False,
202
- ) -> str | Generator[str, None, None]:
203
- """Generate response."""
204
- if stream:
205
- for chunk in self.ask(
206
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
207
- ):
208
- if isinstance(chunk, dict):
209
- yield chunk.get("text", "")
210
- else:
211
- yield str(chunk)
212
- else:
213
- response = self.ask(
214
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
215
- )
216
- return response.get("text", "") if isinstance(response, dict) else str(response)
288
+ ) -> str:
289
+ """Generate response with logging."""
290
+ if self.logger:
291
+ self.logger.debug(f"chat() called with prompt: {prompt}")
292
+
293
+ response = self.ask(
294
+ prompt, optimizer=optimizer, conversationally=conversationally
295
+ )
296
+
297
+ if self.logger:
298
+ self.logger.info("Chat response generated successfully")
299
+
300
+ return self.get_message(response)
217
301
 
218
302
  def get_message(self, response: Dict[str, Any] | str) -> str:
219
- """Retrieves message from response."""
303
+ """
304
+ Retrieves message from response.
305
+
306
+ Args:
307
+ response (Union[Dict[str, Any], str]): The response to extract the message from
308
+
309
+ Returns:
310
+ str: The extracted message text
311
+ """
312
+ if self.logger:
313
+ self.logger.debug(f"Extracting message from response type: {type(response)}")
314
+
220
315
  if isinstance(response, dict):
221
- return response.get("text", "")
316
+ message = response.get("text", "")
317
+ if self.logger:
318
+ self.logger.debug(f"Extracted message from dict: {message[:200]}...")
319
+ return message
222
320
  return str(response)
223
321
 
224
322
  if __name__ == "__main__":
225
323
  from rich import print
226
324
 
227
- ai = MultiChatAI(model="llama-3.1-70b-versatile")
228
- response = ai.chat("What is the meaning of life?", stream=True)
229
- for chunk in response:
230
- print(chunk, end="", flush=True)
325
+ # Example usage with logging enabled
326
+ ai = MultiChatAI(model="deepseek-r1-distill-llama-70b", logging=False)
327
+ try:
328
+ response = ai.chat("What is quantum computing?")
329
+ print(response)
330
+ except Exception as e:
331
+ print(f"Error: {str(e)}")
@@ -1,6 +1,7 @@
1
1
  import requests
2
2
  import json
3
3
  from typing import *
4
+ import requests.exceptions
4
5
 
5
6
  from webscout.AIutel import Optimizers
6
7
  from webscout.AIutel import Conversation
@@ -8,9 +9,10 @@ from webscout.AIutel import AwesomePrompts
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
11
  from webscout.litagent import LitAgent
12
+
11
13
  class TypeGPT(Provider):
12
14
  """
13
- A class to interact with the TypeGPT.net API. Improved to match webscout standards.
15
+ A class to interact with the TypeGPT.net API. Improved to match webscout standards.
14
16
  """
15
17
  url = "https://chat.typegpt.net"
16
18
  working = True
@@ -19,6 +21,7 @@ class TypeGPT(Provider):
19
21
  models = [
20
22
  # OpenAI Models
21
23
  "gpt-3.5-turbo",
24
+ "chatgpt-4o-latest",
22
25
  "gpt-3.5-turbo-202201",
23
26
  "gpt-4o",
24
27
  "gpt-4o-2024-05-13",
@@ -184,7 +187,7 @@ class TypeGPT(Provider):
184
187
  proxies: dict = {},
185
188
  history_offset: int = 10250,
186
189
  act: str = None,
187
- model: str = "claude-3-5-sonnet-20240620",
190
+ model: str = "gpt-4o",
188
191
  system_prompt: str = "You are a helpful assistant.",
189
192
  temperature: float = 0.5,
190
193
  presence_penalty: int = 0,
@@ -217,7 +220,6 @@ class TypeGPT(Provider):
217
220
  "user-agent": LitAgent().random()
218
221
  }
219
222
 
220
-
221
223
  self.__available_optimizers = (
222
224
  method
223
225
  for method in dir(Optimizers)
@@ -230,9 +232,7 @@ class TypeGPT(Provider):
230
232
  if act
231
233
  else intro or Conversation.intro
232
234
  )
233
- self.conversation = Conversation(
234
- is_conversation, self.max_tokens_to_sample, filepath, update_file
235
- )
235
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
236
236
  self.conversation.history_offset = history_offset
237
237
  self.session.proxies = proxies
238
238
 
@@ -256,7 +256,6 @@ class TypeGPT(Provider):
256
256
  f"Optimizer is not one of {self.__available_optimizers}"
257
257
  )
258
258
 
259
-
260
259
  payload = {
261
260
  "messages": [
262
261
  {"role": "system", "content": self.system_prompt},
@@ -270,10 +269,17 @@ class TypeGPT(Provider):
270
269
  "top_p": self.top_p,
271
270
  "max_tokens": self.max_tokens_to_sample,
272
271
  }
272
+
273
273
  def for_stream():
274
- response = self.session.post(
275
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
276
- )
274
+ try:
275
+ response = self.session.post(
276
+ self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
277
+ )
278
+ except requests.exceptions.ConnectionError as ce:
279
+ raise exceptions.FailedToGenerateResponseError(
280
+ f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
281
+ ) from ce
282
+
277
283
  if not response.ok:
278
284
  raise exceptions.FailedToGenerateResponseError(
279
285
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
@@ -287,10 +293,8 @@ class TypeGPT(Provider):
287
293
  # Skip [DONE] message
288
294
  if line.strip() == "[DONE]":
289
295
  break
290
-
291
296
  try:
292
297
  data = json.loads(line)
293
-
294
298
  # Extract and yield only new content
295
299
  if 'choices' in data and len(data['choices']) > 0:
296
300
  delta = data['choices'][0].get('delta', {})
@@ -300,14 +304,18 @@ class TypeGPT(Provider):
300
304
  # Yield only the new content
301
305
  yield dict(text=new_content) if not raw else new_content
302
306
  self.last_response = dict(text=message_load)
303
-
304
307
  except json.JSONDecodeError:
305
308
  continue
306
309
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
307
310
 
308
311
  def for_non_stream():
312
+ try:
313
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
314
+ except requests.exceptions.ConnectionError as ce:
315
+ raise exceptions.FailedToGenerateResponseError(
316
+ f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
317
+ ) from ce
309
318
 
310
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload)
311
319
  if not response.ok:
312
320
  raise exceptions.FailedToGenerateResponseError(
313
321
  f"Request failed - {response.status_code}: {response.text}"
@@ -316,10 +324,8 @@ class TypeGPT(Provider):
316
324
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
317
325
  return self.last_response
318
326
 
319
-
320
327
  return for_stream() if stream else for_non_stream()
321
328
 
322
-
323
329
  def chat(
324
330
  self,
325
331
  prompt: str,
@@ -327,8 +333,7 @@ class TypeGPT(Provider):
327
333
  optimizer: str = None,
328
334
  conversationally: bool = False,
329
335
  ) -> str | Generator[str, None, None]:
330
- """Generate response `str` or stream."""
331
-
336
+ """Generate response string or stream."""
332
337
  if stream:
333
338
  gen = self.ask(
334
339
  prompt, stream=True, optimizer=optimizer, conversationally=conversationally
@@ -340,18 +345,16 @@ class TypeGPT(Provider):
340
345
 
341
346
  def get_message(self, response: Dict[str, Any]) -> str:
342
347
  """Retrieves message from response."""
343
- if isinstance(response, str): #Handle raw responses
348
+ if isinstance(response, str): # Handle raw responses
344
349
  return response
345
350
  elif isinstance(response, dict):
346
351
  assert isinstance(response, dict), "Response should be of dict data-type only"
347
- return response.get("text", "") #Extract text from dictionary response
352
+ return response.get("text", "") # Extract text from dictionary response
348
353
  else:
349
354
  raise TypeError("Invalid response type. Expected str or dict.")
350
355
 
351
-
352
356
  if __name__ == "__main__":
353
-
354
- ai = TypeGPT(model="claude-3-5-sonnet-20240620")
357
+ ai = TypeGPT(model="chatgpt-4o-latest")
355
358
  response = ai.chat("hi", stream=True)
356
359
  for chunks in response:
357
360
  print(chunks, end="", flush=True)
webscout/Provider/yep.py CHANGED
@@ -24,7 +24,7 @@ class YEPCHAT(Provider):
24
24
  AVAILABLE_MODELS (list): List of available models for the provider.
25
25
  """
26
26
 
27
- AVAILABLE_MODELS = ["Mixtral-8x7B-Instruct-v0.1"]
27
+ AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B"]
28
28
 
29
29
  def __init__(
30
30
  self,
@@ -37,7 +37,7 @@ class YEPCHAT(Provider):
37
37
  proxies: dict = {},
38
38
  history_offset: int = 10250,
39
39
  act: str = None,
40
- model: str = "Mixtral-8x7B-Instruct-v0.1",
40
+ model: str = "DeepSeek-R1-Distill-Qwen-32B",
41
41
  temperature: float = 0.6,
42
42
  top_p: float = 0.7,
43
43
  logging: bool = False,
@@ -257,6 +257,6 @@ if __name__ == "__main__":
257
257
 
258
258
  ai = YEPCHAT(logging=False)
259
259
 
260
- response = ai.chat("hi", stream=True)
260
+ response = ai.chat("how many r in 'strawberry'", stream=True)
261
261
  for chunk in response:
262
262
  print(chunk, end="", flush=True)
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "7.0"
1
+ __version__ = "7.1"
2
2
  __prog__ = "webscout"