webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,433 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ # Model configurations
25
+ MODEL_CONFIGS = {
26
+ "exaanswer": {
27
+ "endpoint": "https://ayle.chat/api/exaanswer",
28
+ "models": ["exaanswer"],
29
+ },
30
+ "gemini": {
31
+ "endpoint": "https://ayle.chat/api/gemini",
32
+ "models": [
33
+ "gemini-2.0-flash",
34
+ "gemini-2.0-flash-exp-image-generation",
35
+ "gemini-2.0-flash-thinking-exp-01-21",
36
+ "gemini-2.5-pro-exp-03-25",
37
+ "gemini-2.0-pro-exp-02-05",
38
+
39
+ ],
40
+ },
41
+ "openrouter": {
42
+ "endpoint": "https://ayle.chat/api/openrouter",
43
+ "models": [
44
+ "mistralai/mistral-small-3.1-24b-instruct:free",
45
+ "deepseek/deepseek-r1:free",
46
+ "deepseek/deepseek-chat-v3-0324:free",
47
+ "google/gemma-3-27b-it:free",
48
+ "meta-llama/llama-4-maverick:free",
49
+ ],
50
+ },
51
+ "groq": {
52
+ "endpoint": "https://ayle.chat/api/groq",
53
+ "models": [
54
+ "deepseek-r1-distill-llama-70b",
55
+ "deepseek-r1-distill-qwen-32b",
56
+ "gemma2-9b-it",
57
+ "llama-3.1-8b-instant",
58
+ "llama-3.2-1b-preview",
59
+ "llama-3.2-3b-preview",
60
+ "llama-3.2-90b-vision-preview",
61
+ "llama-3.3-70b-specdec",
62
+ "llama-3.3-70b-versatile",
63
+ "llama3-70b-8192",
64
+ "llama3-8b-8192",
65
+ "qwen-2.5-32b",
66
+ "qwen-2.5-coder-32b",
67
+ "qwen-qwq-32b",
68
+ "meta-llama/llama-4-scout-17b-16e-instruct"
69
+ ],
70
+ },
71
+ "cerebras": {
72
+ "endpoint": "https://ayle.chat/api/cerebras",
73
+ "models": [
74
+ "llama3.1-8b",
75
+ "llama-3.3-70b"
76
+ ],
77
+ },
78
+ "xai": {
79
+ "endpoint": "https://ayle.chat/api/xai",
80
+ "models": [
81
+ "grok-3-mini-beta"
82
+ ],
83
+ },
84
+ }
85
+
86
+ class Completions(BaseCompletions):
87
+ def __init__(self, client: 'ExaChat'):
88
+ self._client = client
89
+
90
+ def create(
91
+ self,
92
+ *,
93
+ model: str,
94
+ messages: List[Dict[str, str]],
95
+ max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
96
+ stream: bool = False,
97
+ temperature: Optional[float] = None,
98
+ top_p: Optional[float] = None,
99
+ **kwargs: Any
100
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
101
+ """
102
+ Creates a model response for the given chat conversation.
103
+ Mimics openai.chat.completions.create
104
+ """
105
+ # Format the messages using the format_prompt utility
106
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
107
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
108
+
109
+ # Determine the provider based on the model
110
+ provider = self._client._get_provider_from_model(model)
111
+
112
+ # Build the appropriate payload based on the provider
113
+ if provider == "exaanswer":
114
+ payload = {
115
+ "query": question,
116
+ "messages": []
117
+ }
118
+ elif provider in ["gemini", "cerebras"]:
119
+ payload = {
120
+ "query": question,
121
+ "model": model,
122
+ "messages": []
123
+ }
124
+ else: # openrouter or groq
125
+ payload = {
126
+ "query": question + "\n", # Add newline for openrouter and groq models
127
+ "model": model,
128
+ "messages": []
129
+ }
130
+
131
+ request_id = f"chatcmpl-{uuid.uuid4()}"
132
+ created_time = int(time.time())
133
+
134
+ if stream:
135
+ return self._create_stream(request_id, created_time, model, provider, payload)
136
+ else:
137
+ return self._create_non_stream(request_id, created_time, model, provider, payload)
138
+
139
+ def _create_stream(
140
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
141
+ ) -> Generator[ChatCompletionChunk, None, None]:
142
+ try:
143
+ endpoint = self._client._get_endpoint(provider)
144
+ response = self._client.session.post(
145
+ endpoint,
146
+ headers=self._client.headers,
147
+ json=payload,
148
+ stream=True,
149
+ timeout=self._client.timeout
150
+ )
151
+ response.raise_for_status()
152
+
153
+ # Track token usage across chunks
154
+ completion_tokens = 0
155
+ streaming_text = ""
156
+
157
+ for line in response.iter_lines():
158
+ if not line:
159
+ continue
160
+
161
+ try:
162
+ data = json.loads(line.decode('utf-8'))
163
+ if 'choices' in data and len(data['choices']) > 0:
164
+ content = data['choices'][0].get('delta', {}).get('content', '')
165
+ if content:
166
+ streaming_text += content
167
+ completion_tokens += len(content) // 4 # Rough estimate
168
+
169
+ # Create a delta object for this chunk
170
+ delta = ChoiceDelta(content=content)
171
+ choice = Choice(index=0, delta=delta, finish_reason=None)
172
+
173
+ chunk = ChatCompletionChunk(
174
+ id=request_id,
175
+ choices=[choice],
176
+ created=created_time,
177
+ model=model,
178
+ )
179
+
180
+ yield chunk
181
+ except json.JSONDecodeError:
182
+ continue
183
+
184
+ # Final chunk with finish_reason
185
+ delta = ChoiceDelta(content=None)
186
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
187
+
188
+ chunk = ChatCompletionChunk(
189
+ id=request_id,
190
+ choices=[choice],
191
+ created=created_time,
192
+ model=model,
193
+ )
194
+
195
+ yield chunk
196
+
197
+ except requests.exceptions.RequestException as e:
198
+ print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
199
+ raise IOError(f"ExaChat request failed: {e}") from e
200
+
201
+ def _create_non_stream(
202
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
203
+ ) -> ChatCompletion:
204
+ try:
205
+ endpoint = self._client._get_endpoint(provider)
206
+ response = self._client.session.post(
207
+ endpoint,
208
+ headers=self._client.headers,
209
+ json=payload,
210
+ timeout=self._client.timeout
211
+ )
212
+ response.raise_for_status()
213
+
214
+ full_response = ""
215
+ for line in response.iter_lines():
216
+ if line:
217
+ try:
218
+ data = json.loads(line.decode('utf-8'))
219
+ if 'choices' in data and len(data['choices']) > 0:
220
+ content = data['choices'][0].get('delta', {}).get('content', '')
221
+ if content:
222
+ full_response += content
223
+ except json.JSONDecodeError:
224
+ continue
225
+
226
+ # Create usage statistics (estimated)
227
+ prompt_tokens = len(payload["query"]) // 4
228
+ completion_tokens = len(full_response) // 4
229
+ total_tokens = prompt_tokens + completion_tokens
230
+
231
+ usage = CompletionUsage(
232
+ prompt_tokens=prompt_tokens,
233
+ completion_tokens=completion_tokens,
234
+ total_tokens=total_tokens
235
+ )
236
+
237
+ # Create the message object
238
+ message = ChatCompletionMessage(
239
+ role="assistant",
240
+ content=full_response
241
+ )
242
+
243
+ # Create the choice object
244
+ choice = Choice(
245
+ index=0,
246
+ message=message,
247
+ finish_reason="stop"
248
+ )
249
+
250
+ # Create the completion object
251
+ completion = ChatCompletion(
252
+ id=request_id,
253
+ choices=[choice],
254
+ created=created_time,
255
+ model=model,
256
+ usage=usage,
257
+ )
258
+
259
+ return completion
260
+
261
+ except Exception as e:
262
+ print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
263
+ raise IOError(f"ExaChat request failed: {e}") from e
264
+
265
+ class Chat(BaseChat):
266
+ def __init__(self, client: 'ExaChat'):
267
+ self.completions = Completions(client)
268
+
269
+ class ExaChat(OpenAICompatibleProvider):
270
+ """
271
+ OpenAI-compatible client for ExaChat API.
272
+
273
+ Usage:
274
+ client = ExaChat()
275
+ response = client.chat.completions.create(
276
+ model="exaanswer",
277
+ messages=[{"role": "user", "content": "Hello!"}]
278
+ )
279
+ print(response.choices[0].message.content)
280
+ """
281
+
282
+ AVAILABLE_MODELS = [
283
+ # ExaAnswer Models
284
+ "exaanswer",
285
+
286
+ # XAI Models
287
+ "grok-3-mini-beta",
288
+
289
+ # Gemini Models
290
+ "gemini-2.0-flash",
291
+ "gemini-2.0-flash-exp-image-generation",
292
+ "gemini-2.0-flash-thinking-exp-01-21",
293
+ "gemini-2.5-pro-exp-03-25",
294
+ "gemini-2.0-pro-exp-02-05",
295
+
296
+ # OpenRouter Models
297
+ "mistralai/mistral-small-3.1-24b-instruct:free",
298
+ "deepseek/deepseek-r1:free",
299
+ "deepseek/deepseek-chat-v3-0324:free",
300
+ "google/gemma-3-27b-it:free",
301
+ "meta-llama/llama-4-maverick:free",
302
+
303
+ # Groq Models
304
+ "deepseek-r1-distill-llama-70b",
305
+ "deepseek-r1-distill-qwen-32b",
306
+ "gemma2-9b-it",
307
+ "llama-3.1-8b-instant",
308
+ "llama-3.2-1b-preview",
309
+ "llama-3.2-3b-preview",
310
+ "llama-3.2-90b-vision-preview",
311
+ "llama-3.3-70b-specdec",
312
+ "llama-3.3-70b-versatile",
313
+ "llama3-70b-8192",
314
+ "llama3-8b-8192",
315
+ "qwen-2.5-32b",
316
+ "qwen-2.5-coder-32b",
317
+ "qwen-qwq-32b",
318
+ "meta-llama/llama-4-scout-17b-16e-instruct",
319
+
320
+
321
+ # Cerebras Models
322
+ "llama3.1-8b",
323
+ "llama-3.3-70b",
324
+
325
+ ]
326
+
327
+ def __init__(
328
+ self,
329
+ timeout: int = 30,
330
+ temperature: float = 0.5,
331
+ top_p: float = 1.0
332
+ ):
333
+ """
334
+ Initialize the ExaChat client.
335
+
336
+ Args:
337
+ timeout: Request timeout in seconds.
338
+ temperature: Temperature for response generation.
339
+ top_p: Top-p sampling parameter.
340
+ """
341
+ self.timeout = timeout
342
+ self.temperature = temperature
343
+ self.top_p = top_p
344
+
345
+ # Initialize LitAgent for user agent generation
346
+ agent = LitAgent()
347
+
348
+ self.headers = {
349
+ "accept": "*/*",
350
+ "accept-language": "en-US,en;q=0.9",
351
+ "content-type": "application/json",
352
+ "origin": "https://ayle.chat/",
353
+ "referer": "https://ayle.chat//",
354
+ "user-agent": agent.random(),
355
+ }
356
+
357
+ self.session = requests.Session()
358
+ self.session.headers.update(self.headers)
359
+ self.session.cookies.update({"session": uuid.uuid4().hex})
360
+
361
+ # Initialize the chat interface
362
+ self.chat = Chat(self)
363
+
364
+ def _get_endpoint(self, provider: str) -> str:
365
+ """Get the API endpoint for the specified provider."""
366
+ return MODEL_CONFIGS[provider]["endpoint"]
367
+
368
+ def _get_provider_from_model(self, model: str) -> str:
369
+ """Determine the provider based on the model name."""
370
+ for provider, config in MODEL_CONFIGS.items():
371
+ if model in config["models"]:
372
+ return provider
373
+
374
+ # If model not found, use a default model
375
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
376
+ return "exaanswer"
377
+
378
+ def convert_model_name(self, model: str) -> str:
379
+ """
380
+ Ensure the model name is in the correct format.
381
+ """
382
+ if model in self.AVAILABLE_MODELS:
383
+ return model
384
+
385
+ # Try to find a matching model
386
+ for available_model in self.AVAILABLE_MODELS:
387
+ if model.lower() in available_model.lower():
388
+ return available_model
389
+
390
+ # Default to exaanswer if no match
391
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
392
+ return "exaanswer"
393
+
394
+
395
+ # Simple test if run directly
396
+ if __name__ == "__main__":
397
+ print("-" * 80)
398
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
399
+ print("-" * 80)
400
+
401
+ # Test a subset of models to avoid excessive API calls
402
+ test_models = [
403
+ "exaanswer",
404
+ "gemini-2.0-flash",
405
+ "deepseek/deepseek-r1:free",
406
+ "llama-3.1-8b-instant",
407
+ "llama3.1-8b"
408
+ ]
409
+
410
+ for model in test_models:
411
+ try:
412
+ client = ExaChat(timeout=60)
413
+ # Test with a simple conversation to demonstrate format_prompt usage
414
+ response = client.chat.completions.create(
415
+ model=model,
416
+ messages=[
417
+ {"role": "system", "content": "You are a helpful assistant."},
418
+ {"role": "user", "content": "Say 'Hello' in one word"},
419
+ ],
420
+ stream=False
421
+ )
422
+
423
+ if response and response.choices and response.choices[0].message.content:
424
+ status = "✓"
425
+ # Truncate response if too long
426
+ display_text = response.choices[0].message.content.strip()
427
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
428
+ else:
429
+ status = "✗"
430
+ display_text = "Empty or invalid response"
431
+ print(f"{model:<50} {status:<10} {display_text}")
432
+ except Exception as e:
433
+ print(f"{model:<50} {'✗':<10} {str(e)}")