webscout 7.5__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (132) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/__init__.py +9 -9
  9. webscout/Extra/autocoder/autocoder_utiles.py +193 -199
  10. webscout/Extra/autocoder/rawdog.py +789 -677
  11. webscout/Extra/gguf.py +682 -428
  12. webscout/Extra/weather.py +178 -156
  13. webscout/Extra/weather_ascii.py +70 -17
  14. webscout/Litlogger/core/logger.py +1 -2
  15. webscout/Litlogger/handlers/file.py +1 -1
  16. webscout/Litlogger/styles/formats.py +0 -2
  17. webscout/Litlogger/utils/detectors.py +0 -1
  18. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  19. webscout/Provider/AISEARCH/ISou.py +1 -22
  20. webscout/Provider/AISEARCH/felo_search.py +0 -1
  21. webscout/Provider/AllenAI.py +28 -30
  22. webscout/Provider/C4ai.py +29 -11
  23. webscout/Provider/ChatGPTClone.py +226 -0
  24. webscout/Provider/ChatGPTGratis.py +24 -56
  25. webscout/Provider/DeepSeek.py +25 -17
  26. webscout/Provider/Deepinfra.py +115 -48
  27. webscout/Provider/Gemini.py +1 -1
  28. webscout/Provider/Glider.py +33 -12
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +23 -7
  31. webscout/Provider/Hunyuan.py +272 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/LambdaChat.py +391 -0
  34. webscout/Provider/Netwrck.py +42 -19
  35. webscout/Provider/OLLAMA.py +256 -32
  36. webscout/Provider/PI.py +4 -2
  37. webscout/Provider/Perplexitylabs.py +26 -6
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +179 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  51. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  52. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  53. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  54. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  55. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  56. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  57. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  58. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  59. webscout/Provider/TTS/__init__.py +2 -2
  60. webscout/Provider/TTS/deepgram.py +12 -39
  61. webscout/Provider/TTS/elevenlabs.py +14 -40
  62. webscout/Provider/TTS/gesserit.py +11 -35
  63. webscout/Provider/TTS/murfai.py +13 -39
  64. webscout/Provider/TTS/parler.py +17 -40
  65. webscout/Provider/TTS/speechma.py +180 -0
  66. webscout/Provider/TTS/streamElements.py +17 -44
  67. webscout/Provider/TextPollinationsAI.py +39 -59
  68. webscout/Provider/Venice.py +25 -8
  69. webscout/Provider/WebSim.py +227 -0
  70. webscout/Provider/WiseCat.py +27 -5
  71. webscout/Provider/Youchat.py +64 -37
  72. webscout/Provider/__init__.py +12 -7
  73. webscout/Provider/akashgpt.py +20 -5
  74. webscout/Provider/flowith.py +33 -7
  75. webscout/Provider/freeaichat.py +32 -45
  76. webscout/Provider/koala.py +20 -5
  77. webscout/Provider/labyrinth.py +239 -0
  78. webscout/Provider/learnfastai.py +28 -15
  79. webscout/Provider/llamatutor.py +1 -1
  80. webscout/Provider/llmchat.py +30 -8
  81. webscout/Provider/multichat.py +65 -9
  82. webscout/Provider/sonus.py +208 -0
  83. webscout/Provider/talkai.py +1 -0
  84. webscout/Provider/turboseek.py +3 -0
  85. webscout/Provider/tutorai.py +2 -0
  86. webscout/Provider/typegpt.py +155 -65
  87. webscout/Provider/uncovr.py +297 -0
  88. webscout/Provider/x0gpt.py +3 -1
  89. webscout/Provider/yep.py +102 -20
  90. webscout/__init__.py +3 -0
  91. webscout/cli.py +53 -40
  92. webscout/conversation.py +1 -10
  93. webscout/litagent/__init__.py +2 -2
  94. webscout/litagent/agent.py +356 -20
  95. webscout/litagent/constants.py +34 -5
  96. webscout/litprinter/__init__.py +0 -3
  97. webscout/models.py +181 -0
  98. webscout/optimizers.py +1 -1
  99. webscout/prompt_manager.py +2 -8
  100. webscout/scout/core/scout.py +1 -4
  101. webscout/scout/core/search_result.py +1 -1
  102. webscout/scout/core/text_utils.py +1 -1
  103. webscout/scout/core.py +2 -5
  104. webscout/scout/element.py +1 -1
  105. webscout/scout/parsers/html_parser.py +1 -1
  106. webscout/scout/utils.py +0 -1
  107. webscout/swiftcli/__init__.py +1 -3
  108. webscout/tempid.py +1 -1
  109. webscout/update_checker.py +1 -3
  110. webscout/version.py +1 -1
  111. webscout/webscout_search_async.py +1 -2
  112. webscout/yep_search.py +297 -297
  113. {webscout-7.5.dist-info → webscout-7.7.dist-info}/LICENSE.md +4 -4
  114. {webscout-7.5.dist-info → webscout-7.7.dist-info}/METADATA +127 -405
  115. {webscout-7.5.dist-info → webscout-7.7.dist-info}/RECORD +118 -117
  116. webscout/Extra/autollama.py +0 -231
  117. webscout/Provider/Amigo.py +0 -274
  118. webscout/Provider/Bing.py +0 -243
  119. webscout/Provider/DiscordRocks.py +0 -253
  120. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  121. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  122. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  123. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  124. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  125. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  126. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  127. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  128. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  129. webscout/Provider/TTS/voicepod.py +0 -117
  130. {webscout-7.5.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  131. {webscout-7.5.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  132. {webscout-7.5.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,391 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import random
5
+ import re
6
+ import uuid
7
+ from typing import Any, Dict, List, Optional, Union, Generator
8
+
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class LambdaChat(Provider):
15
+ """
16
+ A class to interact with the Lambda Chat API.
17
+ Supports streaming responses.
18
+ """
19
+ url = "https://lambda.chat"
20
+
21
+ AVAILABLE_MODELS = [
22
+ "deepseek-llama3.3-70b",
23
+ "deepseek-r1",
24
+ "hermes-3-llama-3.1-405b-fp8",
25
+ "llama3.1-nemotron-70b-instruct",
26
+ "lfm-40b",
27
+ "llama3.3-70b-instruct-fp8"
28
+ ]
29
+
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 2000,
34
+ timeout: int = 60,
35
+ filepath: str = None,
36
+ update_file: bool = True,
37
+ proxies: dict = {},
38
+ model: str = "deepseek-llama3.3-70b",
39
+ assistantId: str = None,
40
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.",
41
+ ):
42
+ """Initialize the LambdaChat client."""
43
+ if model not in self.AVAILABLE_MODELS:
44
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
+
46
+ self.model = model
47
+ self.session = requests.Session()
48
+ self.session.proxies.update(proxies)
49
+ self.assistantId = assistantId
50
+ self.system_prompt = system_prompt
51
+
52
+ # Set up headers for all requests
53
+ self.headers = {
54
+ "Content-Type": "application/json",
55
+ "User-Agent": LitAgent().random(),
56
+ "Accept": "*/*",
57
+ "Accept-Encoding": "gzip, deflate, br, zstd",
58
+ "Accept-Language": "en-US,en;q=0.9",
59
+ "Origin": self.url,
60
+ "Referer": f"{self.url}/",
61
+ "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
62
+ "Sec-Ch-Ua-Mobile": "?0",
63
+ "Sec-Ch-Ua-Platform": "\"Windows\"",
64
+ "Sec-Fetch-Dest": "empty",
65
+ "Sec-Fetch-Mode": "cors",
66
+ "Sec-Fetch-Site": "same-origin",
67
+ "DNT": "1",
68
+ "Priority": "u=1, i"
69
+ }
70
+
71
+ # Provider settings
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.timeout = timeout
75
+ self.last_response = {}
76
+
77
+ # Initialize conversation history
78
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
79
+
80
+ # Store conversation data for different models
81
+ self._conversation_data = {}
82
+
83
+ def create_conversation(self, model: str):
84
+ """Create a new conversation with the specified model."""
85
+ url = f"{self.url}/conversation"
86
+ payload = {
87
+ "model": model
88
+ }
89
+
90
+ # Update referer for this specific request
91
+ headers = self.headers.copy()
92
+ headers["Referer"] = f"{self.url}/models/{model}"
93
+
94
+ try:
95
+ response = self.session.post(url, json=payload, headers=headers)
96
+
97
+ if response.status_code == 401:
98
+ raise exceptions.AuthenticationError("Authentication failed.")
99
+
100
+ # Handle other error codes
101
+ if response.status_code != 200:
102
+ return None
103
+
104
+ data = response.json()
105
+ conversation_id = data.get("conversationId")
106
+
107
+ # Store conversation data
108
+ if model not in self._conversation_data:
109
+ self._conversation_data[model] = {
110
+ "conversationId": conversation_id,
111
+ "messageId": str(uuid.uuid4()) # Initial message ID
112
+ }
113
+
114
+ return conversation_id
115
+ except requests.exceptions.RequestException:
116
+ return None
117
+
118
+ def fetch_message_id(self, conversation_id: str) -> str:
119
+ """Fetch the latest message ID for a conversation."""
120
+ try:
121
+ url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
122
+ response = self.session.get(url, headers=self.headers)
123
+ response.raise_for_status()
124
+
125
+ # Parse the JSON data from the response
126
+ json_data = None
127
+ for line in response.text.split('\n'):
128
+ if line.strip():
129
+ try:
130
+ parsed = json.loads(line)
131
+ if isinstance(parsed, dict) and "nodes" in parsed:
132
+ json_data = parsed
133
+ break
134
+ except json.JSONDecodeError:
135
+ continue
136
+
137
+ if not json_data:
138
+ # Fall back to a UUID if we can't parse the response
139
+ return str(uuid.uuid4())
140
+
141
+ # Extract message ID using the same pattern as in the example
142
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
143
+ return str(uuid.uuid4())
144
+
145
+ data = json_data["nodes"][1]["data"]
146
+ keys = data[data[0]["messages"]]
147
+ message_keys = data[keys[-1]]
148
+ message_id = data[message_keys["id"]]
149
+
150
+ return message_id
151
+
152
+ except Exception:
153
+ # Fall back to a UUID if there's an error
154
+ return str(uuid.uuid4())
155
+
156
+ def generate_boundary(self):
157
+ """Generate a random boundary for multipart/form-data requests"""
158
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
159
+ boundary = "----WebKitFormBoundary"
160
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
161
+ return boundary
162
+
163
+ def process_response(self, response, prompt: str):
164
+ """Process streaming response and extract content."""
165
+ full_text = ""
166
+ sources = None
167
+ reasoning_text = ""
168
+ has_reasoning = False
169
+
170
+ for line in response.iter_lines(decode_unicode=True):
171
+ if not line:
172
+ continue
173
+
174
+ try:
175
+ # Parse each line as JSON
176
+ data = json.loads(line)
177
+
178
+ # Handle different response types
179
+ if "type" not in data:
180
+ continue
181
+
182
+ if data["type"] == "stream" and "token" in data:
183
+ token = data["token"].replace("\u0000", "")
184
+ full_text += token
185
+ resp = {"text": token}
186
+ yield resp
187
+ elif data["type"] == "finalAnswer":
188
+ final_text = data.get("text", "")
189
+ if final_text and not full_text:
190
+ full_text = final_text
191
+ resp = {"text": final_text}
192
+ yield resp
193
+ elif data["type"] == "webSearch" and "sources" in data:
194
+ sources = data["sources"]
195
+ elif data["type"] == "reasoning":
196
+ has_reasoning = True
197
+ if data.get("subtype") == "stream" and "token" in data:
198
+ reasoning_text += data["token"]
199
+
200
+ # If we have reasoning, prepend it to the next text output
201
+ if reasoning_text and not full_text:
202
+ resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
203
+ yield resp
204
+
205
+ except json.JSONDecodeError:
206
+ continue
207
+
208
+ # Update conversation history only for saving to file if needed
209
+ if full_text and self.conversation.file:
210
+ if has_reasoning:
211
+ full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
212
+ self.last_response = {"text": full_text_with_reasoning}
213
+ self.conversation.update_chat_history(prompt, full_text_with_reasoning)
214
+ else:
215
+ self.last_response = {"text": full_text}
216
+ self.conversation.update_chat_history(prompt, full_text)
217
+
218
+ return full_text
219
+
220
+ def ask(
221
+ self,
222
+ prompt: str,
223
+ stream: bool = False,
224
+ raw: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ web_search: bool = False,
228
+ ) -> Union[Dict[str, Any], Generator]:
229
+ """Send a message to the Lambda Chat API"""
230
+ model = self.model
231
+
232
+ # Check if we have a conversation for this model
233
+ if model not in self._conversation_data:
234
+ conversation_id = self.create_conversation(model)
235
+ if not conversation_id:
236
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
237
+ else:
238
+ conversation_id = self._conversation_data[model]["conversationId"]
239
+ # Refresh message ID
240
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
241
+
242
+ url = f"{self.url}/conversation/{conversation_id}"
243
+ message_id = self._conversation_data[model]["messageId"]
244
+
245
+ # Data to send
246
+ request_data = {
247
+ "inputs": prompt,
248
+ "id": message_id,
249
+ "is_retry": False,
250
+ "is_continue": False,
251
+ "web_search": web_search,
252
+ "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
253
+ }
254
+
255
+ # Update headers for this specific request
256
+ headers = self.headers.copy()
257
+ headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
258
+
259
+ # Create multipart form data
260
+ boundary = self.generate_boundary()
261
+ multipart_headers = headers.copy()
262
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
263
+
264
+ # Serialize the data to JSON
265
+ data_json = json.dumps(request_data, separators=(',', ':'))
266
+
267
+ # Create the multipart form data body
268
+ body = f"--{boundary}\r\n"
269
+ body += f'Content-Disposition: form-data; name="data"\r\n'
270
+ body += f"Content-Type: application/json\r\n\r\n"
271
+ body += f"{data_json}\r\n"
272
+ body += f"--{boundary}--\r\n"
273
+
274
+ multipart_headers["Content-Length"] = str(len(body))
275
+
276
+ def for_stream():
277
+ try:
278
+ # Try with multipart/form-data first
279
+ response = None
280
+ try:
281
+ response = self.session.post(
282
+ url,
283
+ data=body,
284
+ headers=multipart_headers,
285
+ stream=True,
286
+ timeout=self.timeout
287
+ )
288
+ except requests.exceptions.RequestException:
289
+ pass
290
+
291
+ # If multipart fails or returns error, try with regular JSON
292
+ if not response or response.status_code != 200:
293
+ response = self.session.post(
294
+ url,
295
+ json=request_data,
296
+ headers=headers,
297
+ stream=True,
298
+ timeout=self.timeout
299
+ )
300
+
301
+ # If both methods fail, raise exception
302
+ if response.status_code != 200:
303
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
304
+
305
+ # Process the streaming response
306
+ yield from self.process_response(response, prompt)
307
+
308
+ except Exception as e:
309
+ if isinstance(e, requests.exceptions.RequestException):
310
+ if hasattr(e, 'response') and e.response is not None:
311
+ status_code = e.response.status_code
312
+ if status_code == 401:
313
+ raise exceptions.AuthenticationError("Authentication failed.")
314
+
315
+ # Try another model if current one fails
316
+ if len(self.AVAILABLE_MODELS) > 1:
317
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
318
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
319
+ self.model = self.AVAILABLE_MODELS[next_model_index]
320
+
321
+ # Create new conversation with the alternate model
322
+ conversation_id = self.create_conversation(self.model)
323
+ if conversation_id:
324
+ # Try again with the new model
325
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
326
+ conversationally=conversationally, web_search=web_search)
327
+ return
328
+
329
+ # If we get here, all models failed
330
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
331
+
332
+ def for_non_stream():
333
+ response_text = ""
334
+ for response in for_stream():
335
+ if "text" in response:
336
+ response_text += response["text"]
337
+ self.last_response = {"text": response_text}
338
+ return self.last_response
339
+
340
+ return for_stream() if stream else for_non_stream()
341
+
342
+ def chat(
343
+ self,
344
+ prompt: str,
345
+ stream: bool = False,
346
+ optimizer: str = None,
347
+ conversationally: bool = False,
348
+ web_search: bool = False
349
+ ) -> Union[str, Generator]:
350
+ """Generate a response to a prompt"""
351
+ def for_stream():
352
+ for response in self.ask(
353
+ prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
354
+ ):
355
+ yield self.get_message(response)
356
+
357
+ def for_non_stream():
358
+ return self.get_message(
359
+ self.ask(
360
+ prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
361
+ )
362
+ )
363
+
364
+ return for_stream() if stream else for_non_stream()
365
+
366
+ def get_message(self, response: dict) -> str:
367
+ """Extract message text from response"""
368
+ assert isinstance(response, dict), "Response should be of dict data-type only"
369
+ return response.get("text", "")
370
+
371
+ if __name__ == "__main__":
372
+ print("-" * 80)
373
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
374
+ print("-" * 80)
375
+
376
+ for model in LambdaChat.AVAILABLE_MODELS:
377
+ try:
378
+ test_ai = LambdaChat(model=model, timeout=60)
379
+ response = test_ai.chat("Say 'Hello' in one word")
380
+ response_text = response
381
+
382
+ if response_text and len(response_text.strip()) > 0:
383
+ status = "✓"
384
+ # Truncate response if too long
385
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
386
+ else:
387
+ status = "✗"
388
+ display_text = "Empty or invalid response"
389
+ print(f"{model:<50} {status:<10} {display_text}")
390
+ except Exception as e:
391
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -16,22 +16,22 @@ class Netwrck(Provider):
16
16
  """
17
17
  greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
18
18
 
19
- AVAILABLE_MODELS = {
20
- "lumimaid": "neversleep/llama-3-lumimaid-8b:extended",
21
- "grok": "x-ai/grok-2",
22
- "claude": "anthropic/claude-3-7-sonnet-20250219",
23
- "euryale": "sao10k/l3-euryale-70b",
24
- "gpt4mini": "openai/gpt-4o-mini",
25
- "mythomax": "gryphe/mythomax-l2-13b",
26
- "gemini": "google/gemini-pro-1.5",
27
- "nemotron": "nvidia/llama-3.1-nemotron-70b-instruct",
28
- "deepseek-r1": "deepseek/deepseek-r1",
29
- "deepseek": "deepseek/deepseek-chat",
30
- }
19
+ AVAILABLE_MODELS = [
20
+ "neversleep/llama-3-lumimaid-8b:extended",
21
+ "x-ai/grok-2",
22
+ "anthropic/claude-3-7-sonnet-20250219",
23
+ "sao10k/l3-euryale-70b",
24
+ "openai/gpt-4o-mini",
25
+ "gryphe/mythomax-l2-13b",
26
+ "google/gemini-pro-1.5",
27
+ "nvidia/llama-3.1-nemotron-70b-instruct",
28
+ "deepseek-r1",
29
+ "deepseek",
30
+ ]
31
31
 
32
32
  def __init__(
33
33
  self,
34
- model: str = "claude",
34
+ model: str = "anthropic/claude-3-7-sonnet-20250219",
35
35
  is_conversation: bool = True,
36
36
  max_tokens: int = 4096,
37
37
  timeout: int = 30,
@@ -47,10 +47,10 @@ class Netwrck(Provider):
47
47
  ):
48
48
  """Initializes the Netwrck API client."""
49
49
  if model not in self.AVAILABLE_MODELS:
50
- raise ValueError(f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}")
50
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
51
 
52
52
  self.model = model
53
- self.model_name = self.AVAILABLE_MODELS[model]
53
+ self.model_name = model # Use the model name directly since it's already in the correct format
54
54
  self.system_prompt = system_prompt
55
55
  self.session = requests.Session()
56
56
  self.is_conversation = is_conversation
@@ -198,7 +198,30 @@ class Netwrck(Provider):
198
198
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
199
199
 
200
200
  if __name__ == "__main__":
201
- from rich import print
202
-
203
- netwrck = Netwrck(model="claude")
204
- print(netwrck.chat("Hello! How are you?"))
201
+ print("-" * 80)
202
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
203
+ print("-" * 80)
204
+
205
+ # Test all available models
206
+ working = 0
207
+ total = len(Netwrck.AVAILABLE_MODELS)
208
+
209
+ for model in Netwrck.AVAILABLE_MODELS:
210
+ try:
211
+ test_ai = Netwrck(model=model, timeout=60)
212
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
213
+ response_text = ""
214
+ for chunk in response:
215
+ response_text += chunk
216
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
217
+
218
+ if response_text and len(response_text.strip()) > 0:
219
+ status = "✓"
220
+ # Truncate response if too long
221
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
222
+ else:
223
+ status = "✗"
224
+ display_text = "Empty or invalid response"
225
+ print(f"\r{model:<50} {status:<10} {display_text}")
226
+ except Exception as e:
227
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")