webscout 8.0__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  2. webscout/Provider/AISEARCH/ISou.py +1 -1
  3. webscout/Provider/AISEARCH/__init__.py +2 -1
  4. webscout/Provider/AISEARCH/felo_search.py +1 -1
  5. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +1 -1
  7. webscout/Provider/AISEARCH/iask_search.py +436 -0
  8. webscout/Provider/AISEARCH/scira_search.py +1 -1
  9. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  10. webscout/Provider/ExaAI.py +1 -1
  11. webscout/Provider/Jadve.py +2 -2
  12. webscout/Provider/OPENAI/__init__.py +17 -0
  13. webscout/Provider/OPENAI/base.py +46 -0
  14. webscout/Provider/OPENAI/c4ai.py +347 -0
  15. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  16. webscout/Provider/OPENAI/deepinfra.py +284 -0
  17. webscout/Provider/OPENAI/exaai.py +419 -0
  18. webscout/Provider/OPENAI/exachat.py +421 -0
  19. webscout/Provider/OPENAI/freeaichat.py +355 -0
  20. webscout/Provider/OPENAI/glider.py +314 -0
  21. webscout/Provider/OPENAI/heckai.py +337 -0
  22. webscout/Provider/OPENAI/llmchatco.py +325 -0
  23. webscout/Provider/OPENAI/netwrck.py +348 -0
  24. webscout/Provider/OPENAI/scirachat.py +459 -0
  25. webscout/Provider/OPENAI/sonus.py +294 -0
  26. webscout/Provider/OPENAI/typegpt.py +361 -0
  27. webscout/Provider/OPENAI/utils.py +211 -0
  28. webscout/Provider/OPENAI/venice.py +428 -0
  29. webscout/Provider/OPENAI/wisecat.py +381 -0
  30. webscout/Provider/OPENAI/x0gpt.py +389 -0
  31. webscout/Provider/OPENAI/yep.py +329 -0
  32. webscout/Provider/Venice.py +1 -1
  33. webscout/Provider/__init__.py +6 -6
  34. webscout/Provider/scira_chat.py +13 -10
  35. webscout/Provider/typegpt.py +3 -184
  36. webscout/prompt_manager.py +2 -1
  37. webscout/version.py +1 -1
  38. webscout-8.1.dist-info/METADATA +683 -0
  39. {webscout-8.0.dist-info → webscout-8.1.dist-info}/RECORD +43 -23
  40. webscout/Provider/flowith.py +0 -207
  41. webscout-8.0.dist-info/METADATA +0 -995
  42. {webscout-8.0.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  43. {webscout-8.0.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  44. {webscout-8.0.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.0.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,348 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.Provider.yep import T
8
+ from webscout.litagent import LitAgent
9
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
+ from .utils import (
11
+ ChatCompletion,
12
+ ChatCompletionChunk,
13
+ Choice,
14
+ ChatCompletionMessage,
15
+ ChoiceDelta,
16
+ CompletionUsage,
17
+ format_prompt,
18
+ get_system_prompt
19
+ )
20
+
21
+ # ANSI escape codes for formatting
22
+ BOLD = "\033[1m"
23
+ RED = "\033[91m"
24
+ RESET = "\033[0m"
25
+
26
+ class Completions(BaseCompletions):
27
+ def __init__(self, client: 'Netwrck'):
28
+ self._client = client
29
+
30
+ def create(
31
+ self,
32
+ *,
33
+ model: str,
34
+ messages: List[Dict[str, str]],
35
+ max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
36
+ stream: bool = False,
37
+ temperature: Optional[float] = None,
38
+ top_p: Optional[float] = None,
39
+ **kwargs: Any
40
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
+ """
42
+ Creates a model response for the given chat conversation.
43
+ Mimics openai.chat.completions.create
44
+ """
45
+ # Format the messages using the format_prompt utility
46
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
47
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
48
+
49
+
50
+ # Prepare the payload for Netwrck API
51
+ payload = {
52
+ "query": formatted_prompt,
53
+ "context": get_system_prompt(messages),
54
+ "examples": [],
55
+ "model_name": self._client.convert_model_name(model),
56
+ "greeting": self._client.greeting
57
+ }
58
+
59
+ request_id = f"chatcmpl-{uuid.uuid4()}"
60
+ created_time = int(time.time())
61
+
62
+ if stream:
63
+ return self._create_stream(request_id, created_time, model, payload)
64
+ else:
65
+ return self._create_non_stream(request_id, created_time, model, payload)
66
+
67
+ def _create_stream(
68
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
69
+ ) -> Generator[ChatCompletionChunk, None, None]:
70
+ try:
71
+ response = self._client.session.post(
72
+ "https://netwrck.com/api/chatpred_or",
73
+ json=payload,
74
+ headers=self._client.headers,
75
+ timeout=self._client.timeout,
76
+ stream=True
77
+ )
78
+ response.raise_for_status()
79
+
80
+ # Track token usage across chunks
81
+ completion_tokens = 0
82
+ streaming_text = ""
83
+
84
+ for line in response.iter_lines():
85
+ if not line:
86
+ continue
87
+
88
+ try:
89
+ decoded_line = line.decode('utf-8').strip('"')
90
+ if decoded_line:
91
+ # Format the decoded line using the client's formatter
92
+ formatted_content = self._client.format_text(decoded_line)
93
+ streaming_text += formatted_content
94
+ completion_tokens += len(formatted_content) // 4 # Rough estimate
95
+
96
+ # Create a delta object for this chunk
97
+ delta = ChoiceDelta(content=formatted_content)
98
+ choice = Choice(index=0, delta=delta, finish_reason=None)
99
+
100
+ chunk = ChatCompletionChunk(
101
+ id=request_id,
102
+ choices=[choice],
103
+ created=created_time,
104
+ model=model,
105
+ )
106
+
107
+ yield chunk
108
+ except Exception:
109
+ continue
110
+
111
+ # Final chunk with finish_reason
112
+ delta = ChoiceDelta(content=None)
113
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
114
+
115
+ chunk = ChatCompletionChunk(
116
+ id=request_id,
117
+ choices=[choice],
118
+ created=created_time,
119
+ model=model,
120
+ )
121
+
122
+ yield chunk
123
+
124
+ except requests.exceptions.RequestException as e:
125
+ print(f"{RED}Error during Netwrck stream request: {e}{RESET}")
126
+ raise IOError(f"Netwrck request failed: {e}") from e
127
+
128
+ def _create_non_stream(
129
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
130
+ ) -> ChatCompletion:
131
+ try:
132
+ response = self._client.session.post(
133
+ "https://netwrck.com/api/chatpred_or",
134
+ json=payload,
135
+ headers=self._client.headers,
136
+ timeout=self._client.timeout
137
+ )
138
+ response.raise_for_status()
139
+
140
+ # Process the response
141
+ raw_response = response.text.strip('"')
142
+ # Format the full response using the client's formatter
143
+ full_response = self._client.format_text(raw_response)
144
+
145
+ # Create usage statistics (estimated)
146
+ prompt_tokens = len(payload["query"]) // 4
147
+ completion_tokens = len(full_response) // 4
148
+ total_tokens = prompt_tokens + completion_tokens
149
+
150
+ usage = CompletionUsage(
151
+ prompt_tokens=prompt_tokens,
152
+ completion_tokens=completion_tokens,
153
+ total_tokens=total_tokens
154
+ )
155
+
156
+ # Create the message object
157
+ message = ChatCompletionMessage(
158
+ role="assistant",
159
+ content=full_response
160
+ )
161
+
162
+ # Create the choice object
163
+ choice = Choice(
164
+ index=0,
165
+ message=message,
166
+ finish_reason="stop"
167
+ )
168
+
169
+ # Create the completion object
170
+ completion = ChatCompletion(
171
+ id=request_id,
172
+ choices=[choice],
173
+ created=created_time,
174
+ model=model,
175
+ usage=usage,
176
+ )
177
+
178
+ return completion
179
+
180
+ except Exception as e:
181
+ print(f"{RED}Error during Netwrck non-stream request: {e}{RESET}")
182
+ raise IOError(f"Netwrck request failed: {e}") from e
183
+
184
+ class Chat(BaseChat):
185
+ def __init__(self, client: 'Netwrck'):
186
+ self.completions = Completions(client)
187
+
188
+ class Netwrck(OpenAICompatibleProvider):
189
+ """
190
+ OpenAI-compatible client for Netwrck API.
191
+
192
+ Usage:
193
+ client = Netwrck()
194
+ response = client.chat.completions.create(
195
+ model="anthropic/claude-3-7-sonnet-20250219",
196
+ messages=[{"role": "user", "content": "Hello!"}]
197
+ )
198
+ print(response.choices[0].message.content)
199
+ """
200
+
201
+ AVAILABLE_MODELS = [
202
+ "neversleep/llama-3-lumimaid-8b:extended",
203
+ "x-ai/grok-2",
204
+ "anthropic/claude-3-7-sonnet-20250219",
205
+ "sao10k/l3-euryale-70b",
206
+ "openai/gpt-4o-mini",
207
+ "gryphe/mythomax-l2-13b",
208
+ "google/gemini-pro-1.5",
209
+ "nvidia/llama-3.1-nemotron-70b-instruct",
210
+ "deepseek/deepseek-r1",
211
+ "deepseek/deepseek-chat"
212
+ ]
213
+
214
+ # Default greeting used by Netwrck
215
+ greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
216
+
217
+ def __init__(
218
+ self,
219
+ timeout: int = 30,
220
+ temperature: float = 0.7,
221
+ top_p: float = 0.8,
222
+ system_prompt: str = "You are a helpful assistant."
223
+ ):
224
+ """
225
+ Initialize the Netwrck client.
226
+
227
+ Args:
228
+ timeout: Request timeout in seconds.
229
+ temperature: Temperature for response generation.
230
+ top_p: Top-p sampling parameter.
231
+ system_prompt: System prompt to use for the conversation.
232
+ """
233
+ self.timeout = timeout
234
+ self.temperature = temperature
235
+ self.top_p = top_p
236
+ self.system_prompt = system_prompt
237
+
238
+ # Initialize LitAgent for user agent generation
239
+ agent = LitAgent()
240
+
241
+ self.headers = {
242
+ 'authority': 'netwrck.com',
243
+ 'accept': '*/*',
244
+ 'accept-language': 'en-US,en;q=0.9',
245
+ 'content-type': 'application/json',
246
+ 'origin': 'https://netwrck.com',
247
+ 'referer': 'https://netwrck.com/',
248
+ 'user-agent': agent.random()
249
+ }
250
+
251
+ self.session = requests.Session()
252
+ self.session.headers.update(self.headers)
253
+
254
+ # Initialize the chat interface
255
+ self.chat = Chat(self)
256
+
257
+ def format_text(self, text: str) -> str:
258
+ """
259
+ Format text by replacing escaped newlines with actual newlines.
260
+
261
+ Args:
262
+ text: Text to format
263
+
264
+ Returns:
265
+ Formatted text
266
+ """
267
+ # Use a more comprehensive approach to handle all escape sequences
268
+ try:
269
+ # First handle double backslashes to avoid issues
270
+ text = text.replace('\\\\', '\\')
271
+
272
+ # Handle common escape sequences
273
+ text = text.replace('\\n', '\n')
274
+ text = text.replace('\\r', '\r')
275
+ text = text.replace('\\t', '\t')
276
+ text = text.replace('\\"', '"')
277
+ text = text.replace("\\'", "'")
278
+
279
+ # Handle any remaining escape sequences using JSON decoding
280
+ # This is a fallback in case there are other escape sequences
281
+ try:
282
+ # Add quotes to make it a valid JSON string
283
+ json_str = f'"{text}"'
284
+ # Use json module to decode all escape sequences
285
+ decoded = json.loads(json_str)
286
+ return decoded
287
+ except json.JSONDecodeError:
288
+ # If JSON decoding fails, return the text with the replacements we've already done
289
+ return text
290
+ except Exception as e:
291
+ # If any error occurs, return the original text
292
+ print(f"Warning: Error formatting text: {e}")
293
+ return text
294
+
295
+ def convert_model_name(self, model: str) -> str:
296
+ """
297
+ Ensure the model name is in the correct format.
298
+ """
299
+ if model in self.AVAILABLE_MODELS:
300
+ return model
301
+
302
+ # Try to find a matching model
303
+ for available_model in self.AVAILABLE_MODELS:
304
+ if model.lower() in available_model.lower():
305
+ return available_model
306
+
307
+ # Default to Claude if no match
308
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'anthropic/claude-3-7-sonnet-20250219'{RESET}")
309
+ return "anthropic/claude-3-7-sonnet-20250219"
310
+
311
+
312
+ # Simple test if run directly
313
+ if __name__ == "__main__":
314
+ print("-" * 80)
315
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
316
+ print("-" * 80)
317
+
318
+ # Test a subset of models to avoid excessive API calls
319
+ test_models = [
320
+ "anthropic/claude-3-7-sonnet-20250219",
321
+ "openai/gpt-4o-mini",
322
+ "deepseek/deepseek-chat"
323
+ ]
324
+
325
+ for model in test_models:
326
+ try:
327
+ client = Netwrck(timeout=60)
328
+ # Test with a simple conversation to demonstrate format_prompt usage
329
+ response = client.chat.completions.create(
330
+ model=model,
331
+ messages=[
332
+ {"role": "system", "content": "You are a helpful assistant."},
333
+ {"role": "user", "content": "Say 'Hello' in one word"},
334
+ ],
335
+ stream=False
336
+ )
337
+
338
+ if response and response.choices and response.choices[0].message.content:
339
+ status = "✓"
340
+ # Truncate response if too long
341
+ display_text = response.choices[0].message.content.strip()
342
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
343
+ else:
344
+ status = "✗"
345
+ display_text = "Empty or invalid response"
346
+ print(f"{model:<50} {status:<10} {display_text}")
347
+ except Exception as e:
348
+ print(f"{model:<50} {'✗':<10} {str(e)}")