webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,294 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ class Completions(BaseCompletions):
25
+ def __init__(self, client: 'SonusAI'):
26
+ self._client = client
27
+
28
+ def create(
29
+ self,
30
+ *,
31
+ model: str,
32
+ messages: List[Dict[str, str]],
33
+ max_tokens: Optional[int] = None, # Not used by SonusAI but kept for compatibility
34
+ stream: bool = False,
35
+ temperature: Optional[float] = None, # Not used by SonusAI but kept for compatibility
36
+ top_p: Optional[float] = None, # Not used by SonusAI but kept for compatibility
37
+ **kwargs: Any # Not used by SonusAI but kept for compatibility
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+ Mimics openai.chat.completions.create
42
+ """
43
+ # Format the messages using the format_prompt utility
44
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
45
+ # SonusAI works better with a properly formatted conversation
46
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
47
+
48
+ # Extract reasoning parameter if provided
49
+ reasoning = kwargs.get('reasoning', False)
50
+
51
+ # Prepare the multipart form data for SonusAI API
52
+ files = {
53
+ 'message': (None, question),
54
+ 'history': (None),
55
+ 'reasoning': (None, str(reasoning).lower()),
56
+ 'model': (None, self._client.convert_model_name(model))
57
+ }
58
+
59
+ request_id = f"chatcmpl-{uuid.uuid4()}"
60
+ created_time = int(time.time())
61
+
62
+ if stream:
63
+ return self._create_stream(request_id, created_time, model, files)
64
+ else:
65
+ return self._create_non_stream(request_id, created_time, model, files)
66
+
67
+ def _create_stream(
68
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
69
+ ) -> Generator[ChatCompletionChunk, None, None]:
70
+ try:
71
+ response = requests.post(
72
+ self._client.url,
73
+ files=files,
74
+ headers=self._client.headers,
75
+ stream=True,
76
+ timeout=self._client.timeout
77
+ )
78
+ response.raise_for_status()
79
+
80
+ # Track token usage across chunks
81
+ completion_tokens = 0
82
+ streaming_text = ""
83
+
84
+ for line in response.iter_lines():
85
+ if not line:
86
+ continue
87
+
88
+ try:
89
+ # Decode the line and remove 'data: ' prefix if present
90
+ line_text = line.decode('utf-8')
91
+ if line_text.startswith('data: '):
92
+ line_text = line_text[6:]
93
+
94
+ data = json.loads(line_text)
95
+ if "content" in data:
96
+ content = data["content"]
97
+ streaming_text += content
98
+ completion_tokens += len(content) // 4 # Rough estimate
99
+
100
+ # Create a delta object for this chunk
101
+ delta = ChoiceDelta(content=content)
102
+ choice = Choice(index=0, delta=delta, finish_reason=None)
103
+
104
+ chunk = ChatCompletionChunk(
105
+ id=request_id,
106
+ choices=[choice],
107
+ created=created_time,
108
+ model=model,
109
+ )
110
+
111
+ yield chunk
112
+ except (json.JSONDecodeError, UnicodeDecodeError):
113
+ continue
114
+
115
+ # Final chunk with finish_reason
116
+ delta = ChoiceDelta(content=None)
117
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
118
+
119
+ chunk = ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[choice],
122
+ created=created_time,
123
+ model=model,
124
+ )
125
+
126
+ yield chunk
127
+
128
+ except requests.exceptions.RequestException as e:
129
+ print(f"{RED}Error during SonusAI stream request: {e}{RESET}")
130
+ raise IOError(f"SonusAI request failed: {e}") from e
131
+
132
+ def _create_non_stream(
133
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
134
+ ) -> ChatCompletion:
135
+ try:
136
+ response = requests.post(
137
+ self._client.url,
138
+ files=files,
139
+ headers=self._client.headers,
140
+ timeout=self._client.timeout
141
+ )
142
+ response.raise_for_status()
143
+
144
+ full_response = ""
145
+ for line in response.iter_lines():
146
+ if line:
147
+ try:
148
+ line_text = line.decode('utf-8')
149
+ if line_text.startswith('data: '):
150
+ line_text = line_text[6:]
151
+ data = json.loads(line_text)
152
+ if "content" in data:
153
+ full_response += data["content"]
154
+ except (json.JSONDecodeError, UnicodeDecodeError):
155
+ continue
156
+
157
+ # Create usage statistics (estimated)
158
+ prompt_tokens = len(files['message'][1]) // 4
159
+ completion_tokens = len(full_response) // 4
160
+ total_tokens = prompt_tokens + completion_tokens
161
+
162
+ usage = CompletionUsage(
163
+ prompt_tokens=prompt_tokens,
164
+ completion_tokens=completion_tokens,
165
+ total_tokens=total_tokens
166
+ )
167
+
168
+ # Create the message object
169
+ message = ChatCompletionMessage(
170
+ role="assistant",
171
+ content=full_response
172
+ )
173
+
174
+ # Create the choice object
175
+ choice = Choice(
176
+ index=0,
177
+ message=message,
178
+ finish_reason="stop"
179
+ )
180
+
181
+ # Create the completion object
182
+ completion = ChatCompletion(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ usage=usage,
188
+ )
189
+
190
+ return completion
191
+
192
+ except Exception as e:
193
+ print(f"{RED}Error during SonusAI non-stream request: {e}{RESET}")
194
+ raise IOError(f"SonusAI request failed: {e}") from e
195
+
196
+ class Chat(BaseChat):
197
+ def __init__(self, client: 'SonusAI'):
198
+ self.completions = Completions(client)
199
+
200
+ class SonusAI(OpenAICompatibleProvider):
201
+ """
202
+ OpenAI-compatible client for Sonus AI API.
203
+
204
+ Usage:
205
+ client = SonusAI()
206
+ response = client.chat.completions.create(
207
+ model="pro",
208
+ messages=[{"role": "user", "content": "Hello!"}]
209
+ )
210
+ print(response.choices[0].message.content)
211
+ """
212
+
213
+ AVAILABLE_MODELS = [
214
+ "pro",
215
+ "air",
216
+ "mini"
217
+ ]
218
+
219
+ def __init__(
220
+ self,
221
+ timeout: int = 30
222
+ ):
223
+ """
224
+ Initialize the SonusAI client.
225
+
226
+ Args:
227
+ timeout: Request timeout in seconds.
228
+ """
229
+ self.timeout = timeout
230
+ self.url = "https://chat.sonus.ai/chat.php"
231
+
232
+ # Headers for the request
233
+ agent = LitAgent()
234
+ self.headers = {
235
+ 'Accept': '*/*',
236
+ 'Accept-Language': 'en-US,en;q=0.9',
237
+ 'Origin': 'https://chat.sonus.ai',
238
+ 'Referer': 'https://chat.sonus.ai/',
239
+ 'User-Agent': agent.random()
240
+ }
241
+
242
+ self.session = requests.Session()
243
+ self.session.headers.update(self.headers)
244
+
245
+ # Initialize the chat interface
246
+ self.chat = Chat(self)
247
+
248
+ def convert_model_name(self, model: str) -> str:
249
+ """
250
+ Ensure the model name is in the correct format.
251
+ """
252
+ if model in self.AVAILABLE_MODELS:
253
+ return model
254
+
255
+ # Try to find a matching model
256
+ for available_model in self.AVAILABLE_MODELS:
257
+ if model.lower() in available_model.lower():
258
+ return available_model
259
+
260
+ # Default to pro if no match
261
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'pro'{RESET}")
262
+ return "pro"
263
+
264
+
265
+ # Simple test if run directly
266
+ if __name__ == "__main__":
267
+ print("-" * 80)
268
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
269
+ print("-" * 80)
270
+
271
+ for model in SonusAI.AVAILABLE_MODELS:
272
+ try:
273
+ client = SonusAI(timeout=60)
274
+ # Test with a simple conversation to demonstrate format_prompt usage
275
+ response = client.chat.completions.create(
276
+ model=model,
277
+ messages=[
278
+ {"role": "system", "content": "You are a helpful assistant."},
279
+ {"role": "user", "content": "Say 'Hello' in one word"},
280
+ ],
281
+ stream=False
282
+ )
283
+
284
+ if response and response.choices and response.choices[0].message.content:
285
+ status = "✓"
286
+ # Truncate response if too long
287
+ display_text = response.choices[0].message.content.strip()
288
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
289
+ else:
290
+ status = "✗"
291
+ display_text = "Empty or invalid response"
292
+ print(f"{model:<50} {status:<10} {display_text}")
293
+ except Exception as e:
294
+ print(f"{model:<50} {'✗':<10} {str(e)}")