webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (98) hide show
  1. webscout/AIutel.py +52 -1016
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
  5. webscout/Provider/AISEARCH/__init__.py +11 -10
  6. webscout/Provider/AISEARCH/felo_search.py +7 -3
  7. webscout/Provider/AISEARCH/scira_search.py +2 -0
  8. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  9. webscout/Provider/Deepinfra.py +13 -1
  10. webscout/Provider/Flowith.py +6 -1
  11. webscout/Provider/GithubChat.py +1 -0
  12. webscout/Provider/GptOss.py +207 -0
  13. webscout/Provider/Kimi.py +445 -0
  14. webscout/Provider/Netwrck.py +3 -6
  15. webscout/Provider/OPENAI/README.md +2 -1
  16. webscout/Provider/OPENAI/TogetherAI.py +12 -8
  17. webscout/Provider/OPENAI/TwoAI.py +94 -1
  18. webscout/Provider/OPENAI/__init__.py +4 -4
  19. webscout/Provider/OPENAI/copilot.py +20 -4
  20. webscout/Provider/OPENAI/deepinfra.py +12 -0
  21. webscout/Provider/OPENAI/e2b.py +60 -8
  22. webscout/Provider/OPENAI/flowith.py +4 -3
  23. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  24. webscout/Provider/OPENAI/gptoss.py +288 -0
  25. webscout/Provider/OPENAI/kimi.py +469 -0
  26. webscout/Provider/OPENAI/netwrck.py +8 -12
  27. webscout/Provider/OPENAI/refact.py +274 -0
  28. webscout/Provider/OPENAI/scirachat.py +4 -0
  29. webscout/Provider/OPENAI/textpollinations.py +11 -10
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/OPENAI/venice.py +1 -0
  32. webscout/Provider/Perplexitylabs.py +163 -147
  33. webscout/Provider/Qodo.py +30 -6
  34. webscout/Provider/TTI/__init__.py +1 -0
  35. webscout/Provider/TTI/bing.py +14 -2
  36. webscout/Provider/TTI/together.py +11 -9
  37. webscout/Provider/TTI/venice.py +368 -0
  38. webscout/Provider/TTS/README.md +0 -1
  39. webscout/Provider/TTS/__init__.py +0 -1
  40. webscout/Provider/TTS/base.py +479 -159
  41. webscout/Provider/TTS/deepgram.py +409 -156
  42. webscout/Provider/TTS/elevenlabs.py +425 -111
  43. webscout/Provider/TTS/freetts.py +317 -140
  44. webscout/Provider/TTS/gesserit.py +192 -128
  45. webscout/Provider/TTS/murfai.py +248 -113
  46. webscout/Provider/TTS/openai_fm.py +347 -129
  47. webscout/Provider/TTS/speechma.py +620 -586
  48. webscout/Provider/TextPollinationsAI.py +11 -10
  49. webscout/Provider/TogetherAI.py +12 -4
  50. webscout/Provider/TwoAI.py +96 -2
  51. webscout/Provider/TypliAI.py +33 -27
  52. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  53. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  54. webscout/Provider/Venice.py +1 -0
  55. webscout/Provider/WiseCat.py +18 -20
  56. webscout/Provider/__init__.py +2 -96
  57. webscout/Provider/cerebras.py +83 -33
  58. webscout/Provider/copilot.py +42 -23
  59. webscout/Provider/scira_chat.py +4 -0
  60. webscout/Provider/toolbaz.py +6 -10
  61. webscout/Provider/typefully.py +1 -11
  62. webscout/__init__.py +3 -15
  63. webscout/auth/__init__.py +19 -4
  64. webscout/auth/api_key_manager.py +189 -189
  65. webscout/auth/auth_system.py +25 -40
  66. webscout/auth/config.py +105 -6
  67. webscout/auth/database.py +377 -22
  68. webscout/auth/models.py +185 -130
  69. webscout/auth/request_processing.py +175 -11
  70. webscout/auth/routes.py +99 -2
  71. webscout/auth/server.py +9 -2
  72. webscout/auth/simple_logger.py +236 -0
  73. webscout/conversation.py +22 -20
  74. webscout/sanitize.py +1078 -0
  75. webscout/scout/README.md +20 -23
  76. webscout/scout/core/crawler.py +125 -38
  77. webscout/scout/core/scout.py +26 -5
  78. webscout/version.py +1 -1
  79. webscout/webscout_search.py +13 -6
  80. webscout/webscout_search_async.py +10 -8
  81. webscout/yep_search.py +13 -5
  82. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
  83. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
  84. webscout/Provider/Glider.py +0 -225
  85. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  86. webscout/Provider/OPENAI/c4ai.py +0 -394
  87. webscout/Provider/OPENAI/glider.py +0 -330
  88. webscout/Provider/OPENAI/typegpt.py +0 -368
  89. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  90. webscout/Provider/TTS/sthir.py +0 -94
  91. webscout/Provider/WritingMate.py +0 -273
  92. webscout/Provider/typegpt.py +0 -284
  93. webscout/Provider/uncovr.py +0 -333
  94. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  95. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  96. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  97. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  98. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,469 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ import random
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import curl_cffi for improved request handling
9
+ from curl_cffi.requests import Session
10
+ from curl_cffi import CurlError
11
+
12
+ # Import base classes and utility structures
13
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
14
+ from .utils import (
15
+ ChatCompletion,
16
+ ChatCompletionChunk,
17
+ Choice,
18
+ ChatCompletionMessage,
19
+ ChoiceDelta,
20
+ CompletionUsage,
21
+ format_prompt,
22
+ get_system_prompt,
23
+ count_tokens
24
+ )
25
+
26
+ # Attempt to import LitAgent, fallback if not available
27
+ try:
28
+ from webscout.litagent import LitAgent
29
+ except ImportError:
30
+ pass
31
+
32
+ from webscout import exceptions
33
+
34
+
35
+ class Completions(BaseCompletions):
36
+ def __init__(self, client: 'Kimi'):
37
+ self._client = client
38
+
39
+ def create(
40
+ self,
41
+ *,
42
+ model: str,
43
+ messages: List[Dict[str, str]],
44
+ max_tokens: Optional[int] = 4000,
45
+ stream: bool = False,
46
+ temperature: Optional[float] = None,
47
+ top_p: Optional[float] = None,
48
+ timeout: Optional[int] = None,
49
+ proxies: Optional[Dict[str, str]] = None,
50
+ **kwargs: Any
51
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
52
+ """
53
+ Create a completion using the Kimi API.
54
+
55
+ Args:
56
+ model: The model to use (k1.5, k2, k1.5-thinking)
57
+ messages: List of message dictionaries with 'role' and 'content'
58
+ max_tokens: Maximum tokens for response
59
+ stream: Whether to stream the response
60
+ temperature: Sampling temperature (not used by Kimi)
61
+ top_p: Top-p sampling (not used by Kimi)
62
+ timeout: Request timeout
63
+ proxies: Proxy configuration
64
+
65
+ Returns:
66
+ ChatCompletion or generator of ChatCompletionChunk
67
+ """
68
+ # Validate model
69
+ if model not in self._client.AVAILABLE_MODELS:
70
+ raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
71
+
72
+ # Ensure authentication and chat creation
73
+ self._client._authenticate()
74
+ self._client._create_chat()
75
+
76
+ # Format messages exactly like the original Kimi.py
77
+ # Use the first user message content directly, no formatting needed
78
+ user_content = ""
79
+ system_content = ""
80
+
81
+ for msg in messages:
82
+ role = msg.get("role", "user")
83
+ content = msg.get("content", "")
84
+ if role == "system":
85
+ system_content = content
86
+ elif role == "user":
87
+ user_content = content
88
+
89
+ # If we have system content, prepend it to user content
90
+ if system_content:
91
+ final_content = f"{system_content}\n\n{user_content}"
92
+ else:
93
+ final_content = user_content
94
+
95
+ # Create payload exactly like the original Kimi.py
96
+ payload = {
97
+ "kimiplus_id": "kimi",
98
+ "extend": {"sidebar": True},
99
+ "model": model,
100
+ "use_search": self._client.web_search,
101
+ "messages": [
102
+ {
103
+ "role": "user",
104
+ "content": final_content
105
+ }
106
+ ],
107
+ "refs": [],
108
+ "history": [],
109
+ "scene_labels": [],
110
+ "use_semantic_memory": False,
111
+ "use_deep_research": False
112
+ }
113
+
114
+ # Generate request ID and timestamp
115
+ request_id = f"chatcmpl-{uuid.uuid4().hex}"
116
+ created_time = int(time.time())
117
+
118
+ if stream:
119
+ return self._create_stream(request_id, created_time, model, messages, payload, timeout, proxies)
120
+ else:
121
+ return self._create_non_stream(request_id, created_time, model, messages, payload, timeout, proxies)
122
+
123
+ def _create_stream(
124
+ self, request_id: str, created_time: int, model: str, messages: List[Dict[str, str]],
125
+ payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
126
+ ) -> Generator[ChatCompletionChunk, None, None]:
127
+ try:
128
+ response = self._client.session.post(
129
+ self._client.chat_completion_endpoint.format(chat_id=self._client.chat_id),
130
+ json=payload,
131
+ stream=True,
132
+ timeout=timeout or self._client.timeout,
133
+ impersonate="chrome110"
134
+ )
135
+ response.raise_for_status()
136
+
137
+ # Calculate prompt tokens using the messages parameter
138
+ prompt_tokens = count_tokens(messages)
139
+ completion_tokens = 0
140
+ total_tokens = prompt_tokens
141
+
142
+ for line in response.iter_lines(decode_unicode=True):
143
+ if line:
144
+ if line.startswith("data: "):
145
+ json_str = line[6:]
146
+ if json_str == "[DONE]":
147
+ break
148
+
149
+ try:
150
+ data = json.loads(json_str)
151
+ if data.get("event") == "cmpl":
152
+ content = data.get("text")
153
+ if content:
154
+ completion_tokens += count_tokens(content)
155
+ total_tokens = prompt_tokens + completion_tokens
156
+
157
+ delta = ChoiceDelta(content=content, role=None, tool_calls=None)
158
+ choice = Choice(index=0, delta=delta, finish_reason=None, logprobs=None)
159
+ chunk = ChatCompletionChunk(
160
+ id=request_id,
161
+ choices=[choice],
162
+ created=created_time,
163
+ model=model,
164
+ system_fingerprint=None
165
+ )
166
+
167
+ # Add usage information
168
+ chunk.usage = {
169
+ "prompt_tokens": prompt_tokens,
170
+ "completion_tokens": completion_tokens,
171
+ "total_tokens": total_tokens,
172
+ "estimated_cost": None
173
+ }
174
+
175
+ yield chunk
176
+ except json.JSONDecodeError:
177
+ continue
178
+
179
+ # Final chunk with finish_reason="stop"
180
+ delta = ChoiceDelta(content=None, role=None, tool_calls=None)
181
+ choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
182
+ chunk = ChatCompletionChunk(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ system_fingerprint=None
188
+ )
189
+ chunk.usage = {
190
+ "prompt_tokens": prompt_tokens,
191
+ "completion_tokens": completion_tokens,
192
+ "total_tokens": total_tokens,
193
+ "estimated_cost": None
194
+ }
195
+ yield chunk
196
+
197
+ except CurlError as e:
198
+ print(f"Error during Kimi stream request: {e}")
199
+ raise IOError(f"Kimi request failed: {e}") from e
200
+ except Exception as e:
201
+ print(f"Error processing Kimi stream: {e}")
202
+ raise
203
+
204
+ def _create_non_stream(
205
+ self, request_id: str, created_time: int, model: str, messages: List[Dict[str, str]],
206
+ payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
207
+ ) -> ChatCompletion:
208
+ try:
209
+ response = self._client.session.post(
210
+ self._client.chat_completion_endpoint.format(chat_id=self._client.chat_id),
211
+ json=payload,
212
+ timeout=timeout or self._client.timeout,
213
+ impersonate="chrome110",
214
+ stream=True
215
+ )
216
+ response.raise_for_status()
217
+
218
+ # Collect all streaming data
219
+ full_text = ""
220
+ for line in response.iter_lines():
221
+ if line:
222
+ # Decode bytes to string if needed
223
+ if isinstance(line, bytes):
224
+ line = line.decode('utf-8')
225
+ if line.startswith("data: "):
226
+ json_str = line[6:]
227
+ if json_str == "[DONE]":
228
+ break
229
+
230
+ try:
231
+ data = json.loads(json_str)
232
+ if data.get("event") == "cmpl":
233
+ content = data.get("text")
234
+ if content:
235
+ full_text += content
236
+ except json.JSONDecodeError:
237
+ continue
238
+
239
+ # Create the message object
240
+ message = ChatCompletionMessage(
241
+ role="assistant",
242
+ content=full_text
243
+ )
244
+
245
+ # Create the choice object
246
+ choice = Choice(
247
+ index=0,
248
+ message=message,
249
+ finish_reason="stop"
250
+ )
251
+
252
+ # Create usage object with proper token counting
253
+ prompt_tokens = count_tokens(messages)
254
+ completion_tokens = count_tokens(full_text) if full_text else 0
255
+ usage = CompletionUsage(
256
+ prompt_tokens=prompt_tokens,
257
+ completion_tokens=completion_tokens,
258
+ total_tokens=prompt_tokens + completion_tokens
259
+ )
260
+
261
+ # Create the completion object
262
+ completion = ChatCompletion(
263
+ id=request_id,
264
+ choices=[choice],
265
+ created=created_time,
266
+ model=model,
267
+ usage=usage,
268
+ )
269
+ return completion
270
+
271
+ except CurlError as e:
272
+ print(f"Error during Kimi request: {e}")
273
+ raise IOError(f"Kimi request failed: {e}") from e
274
+ except Exception as e:
275
+ print(f"Error processing Kimi response: {e}")
276
+ raise
277
+
278
+
279
+ class Chat(BaseChat):
280
+ def __init__(self, client: 'Kimi'):
281
+ self._client = client
282
+ self.completions = Completions(client)
283
+
284
+
285
+ class Kimi(OpenAICompatibleProvider):
286
+ """
287
+ OpenAI-compatible Kimi provider.
288
+
289
+ This provider implements the OpenAI API interface for Kimi models.
290
+ It supports the following models:
291
+ - k1.5
292
+ - k2
293
+ - k1.5-thinking
294
+
295
+ Examples:
296
+ >>> from webscout.Provider.OPENAI.kimi import Kimi
297
+ >>> client = Kimi()
298
+ >>> response = client.chat.completions.create(
299
+ ... model="k2",
300
+ ... messages=[{"role": "user", "content": "Hello!"}]
301
+ ... )
302
+ >>> print(response.choices[0].message.content)
303
+ """
304
+
305
+ AVAILABLE_MODELS = ["k1.5", "k2", "k1.5-thinking"]
306
+
307
+ def __init__(
308
+ self,
309
+ api_key: Optional[str] = None,
310
+ base_url: Optional[str] = None,
311
+ timeout: int = 30,
312
+ proxies: Optional[Dict[str, str]] = None,
313
+ browser: str = "chrome",
314
+ web_search: bool = False,
315
+ **kwargs
316
+ ):
317
+ """
318
+ Initialize the Kimi provider.
319
+
320
+ Args:
321
+ api_key: Not used for Kimi (authentication is handled via device registration)
322
+ base_url: Not used for Kimi
323
+ timeout: Request timeout in seconds
324
+ proxies: Proxy configuration
325
+ browser: Browser to impersonate
326
+ web_search: Whether to enable web search
327
+ **kwargs: Additional arguments
328
+ """
329
+ super().__init__(api_key=api_key, base_url=base_url, timeout=timeout, **kwargs)
330
+
331
+ self.timeout = timeout
332
+ self.proxies = proxies or {}
333
+ self.web_search = web_search
334
+
335
+ # Kimi API endpoints
336
+ self.register_endpoint = "https://www.kimi.com/api/device/register"
337
+ self.chat_create_endpoint = "https://www.kimi.com/api/chat"
338
+ self.chat_completion_endpoint = "https://www.kimi.com/api/chat/{chat_id}/completion/stream"
339
+
340
+ # Initialize session
341
+ self.session = Session()
342
+ self.session.proxies = self.proxies
343
+
344
+ # Initialize LitAgent for browser fingerprinting
345
+ try:
346
+ self.agent = LitAgent()
347
+ self.fingerprint = self.agent.generate_fingerprint(browser)
348
+ except:
349
+ self.fingerprint = {
350
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
351
+ "accept_language": "en-US,en;q=0.9",
352
+ "sec_ch_ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
353
+ "platform": "Windows"
354
+ }
355
+
356
+ # Generate device ID
357
+ self.device_id = str(random.randint(1000000000000000, 9999999999999999))
358
+
359
+ # Headers for Kimi API
360
+ self.headers = {
361
+ "Accept": "text/event-stream",
362
+ "Accept-Language": self.fingerprint["accept_language"],
363
+ "Accept-Encoding": "gzip, deflate, br",
364
+ "Cache-Control": "no-cache",
365
+ "Connection": "keep-alive",
366
+ "Content-Type": "application/json",
367
+ "DNT": "1",
368
+ "Origin": "https://www.kimi.com",
369
+ "Pragma": "no-cache",
370
+ "Referer": "https://www.kimi.com/",
371
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"],
372
+ "Sec-CH-UA-Mobile": "?0",
373
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
374
+ "User-Agent": self.fingerprint["user_agent"],
375
+ "x-msh-device-id": self.device_id,
376
+ "x-msh-platform": "web",
377
+ "x-traffic-id": self.device_id,
378
+ }
379
+
380
+ # Initialize authentication
381
+ self.access_token = None
382
+ self.chat_id = None
383
+
384
+ # Update session headers
385
+ self.session.headers.update(self.headers)
386
+
387
+ # Initialize chat and completions
388
+ self.chat = Chat(self)
389
+ self.completions = Completions(self)
390
+
391
+ @property
392
+ def models(self):
393
+ """Property that returns an object with a .list() method returning available models."""
394
+ class _ModelList:
395
+ def list(inner_self):
396
+ return type(self).AVAILABLE_MODELS
397
+ return _ModelList()
398
+
399
+ def _authenticate(self) -> str:
400
+ """Authenticate with Kimi API and get access token."""
401
+ if self.access_token:
402
+ return self.access_token
403
+
404
+ max_retries = 3
405
+ last_exception = None
406
+
407
+ for attempt in range(max_retries):
408
+ try:
409
+ response = self.session.post(
410
+ self.register_endpoint,
411
+ json={},
412
+ timeout=self.timeout,
413
+ impersonate="chrome110"
414
+ )
415
+ response.raise_for_status()
416
+
417
+ data = response.json()
418
+ if not data.get("access_token"):
419
+ raise exceptions.FailedToGenerateResponseError("No access token received")
420
+
421
+ self.access_token = data["access_token"]
422
+ self.session.headers["Authorization"] = f"Bearer {self.access_token}"
423
+ return self.access_token
424
+
425
+ except CurlError as e:
426
+ last_exception = e
427
+ if attempt < max_retries - 1:
428
+ continue
429
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts (CurlError): {e}")
430
+ except Exception as e:
431
+ last_exception = e
432
+ if attempt < max_retries - 1:
433
+ continue
434
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {e}")
435
+
436
+ def _create_chat(self) -> str:
437
+ """Create a new chat session and return chat ID."""
438
+ if self.chat_id:
439
+ return self.chat_id
440
+
441
+ self._authenticate()
442
+
443
+ try:
444
+ response = self.session.post(
445
+ self.chat_create_endpoint,
446
+ json={
447
+ "name": "Unnamed Chat",
448
+ "born_from": "home",
449
+ "kimiplus_id": "kimi",
450
+ "is_example": False,
451
+ "source": "web",
452
+ "tags": []
453
+ },
454
+ timeout=self.timeout,
455
+ impersonate="chrome110"
456
+ )
457
+ response.raise_for_status()
458
+
459
+ data = response.json()
460
+ self.chat_id = data.get("id")
461
+ if not self.chat_id:
462
+ raise exceptions.FailedToGenerateResponseError("No chat ID received")
463
+
464
+ return self.chat_id
465
+
466
+ except CurlError as e:
467
+ raise exceptions.FailedToGenerateResponseError(f"Chat creation failed (CurlError): {e}")
468
+ except Exception as e:
469
+ raise exceptions.FailedToGenerateResponseError(f"Chat creation failed: {e}")
@@ -4,10 +4,9 @@ import requests
4
4
  import json
5
5
  from typing import List, Dict, Optional, Union, Generator, Any
6
6
 
7
- from webscout.Provider.yep import T
8
7
  from webscout.litagent import LitAgent
9
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
- from .utils import (
8
+ from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from webscout.Provider.OPENAI.utils import (
11
10
  ChatCompletion,
12
11
  ChatCompletionChunk,
13
12
  Choice,
@@ -208,10 +207,7 @@ class Netwrck(OpenAICompatibleProvider):
208
207
  "sao10k/l3-euryale-70b",
209
208
  "deepseek/deepseek-chat",
210
209
  "deepseek/deepseek-r1",
211
- "anthropic/claude-sonnet-4-20250514",
212
- "openai/gpt-4.1-mini",
213
210
  "gryphe/mythomax-l2-13b",
214
- "google/gemini-2.5-flash-preview-04-17",
215
211
  "nvidia/llama-3.1-nemotron-70b-instruct",
216
212
  ]
217
213
 
@@ -308,9 +304,9 @@ class Netwrck(OpenAICompatibleProvider):
308
304
  if model.lower() in available_model.lower():
309
305
  return available_model
310
306
 
311
- # Default to Claude if no match
312
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'anthropic/claude-3-7-sonnet-20250219'{RESET}")
313
- return "anthropic/claude-3-7-sonnet-20250219"
307
+ # Default to DeepSeek if no match
308
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'deepseek/deepseek-r1'{RESET}")
309
+ return "deepseek/deepseek-r1"
314
310
 
315
311
  @property
316
312
  def models(self):
@@ -327,9 +323,9 @@ if __name__ == "__main__":
327
323
 
328
324
  # Test a subset of models to avoid excessive API calls
329
325
  test_models = [
330
- "anthropic/claude-3-7-sonnet-20250219",
331
- "openai/gpt-4o-mini",
332
- "deepseek/deepseek-chat"
326
+ "deepseek/deepseek-r1",
327
+ "deepseek/deepseek-chat",
328
+ "gryphe/mythomax-l2-13b"
333
329
  ]
334
330
 
335
331
  for model in test_models: