webscout 8.0__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  2. webscout/Provider/AISEARCH/ISou.py +1 -1
  3. webscout/Provider/AISEARCH/__init__.py +2 -1
  4. webscout/Provider/AISEARCH/felo_search.py +1 -1
  5. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +1 -1
  7. webscout/Provider/AISEARCH/iask_search.py +436 -0
  8. webscout/Provider/AISEARCH/scira_search.py +1 -1
  9. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  10. webscout/Provider/ExaAI.py +1 -1
  11. webscout/Provider/Jadve.py +2 -2
  12. webscout/Provider/OPENAI/__init__.py +17 -0
  13. webscout/Provider/OPENAI/base.py +46 -0
  14. webscout/Provider/OPENAI/c4ai.py +347 -0
  15. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  16. webscout/Provider/OPENAI/deepinfra.py +284 -0
  17. webscout/Provider/OPENAI/exaai.py +419 -0
  18. webscout/Provider/OPENAI/exachat.py +421 -0
  19. webscout/Provider/OPENAI/freeaichat.py +355 -0
  20. webscout/Provider/OPENAI/glider.py +314 -0
  21. webscout/Provider/OPENAI/heckai.py +337 -0
  22. webscout/Provider/OPENAI/llmchatco.py +325 -0
  23. webscout/Provider/OPENAI/netwrck.py +348 -0
  24. webscout/Provider/OPENAI/scirachat.py +459 -0
  25. webscout/Provider/OPENAI/sonus.py +294 -0
  26. webscout/Provider/OPENAI/typegpt.py +361 -0
  27. webscout/Provider/OPENAI/utils.py +211 -0
  28. webscout/Provider/OPENAI/venice.py +428 -0
  29. webscout/Provider/OPENAI/wisecat.py +381 -0
  30. webscout/Provider/OPENAI/x0gpt.py +389 -0
  31. webscout/Provider/OPENAI/yep.py +329 -0
  32. webscout/Provider/Venice.py +1 -1
  33. webscout/Provider/__init__.py +6 -6
  34. webscout/Provider/scira_chat.py +13 -10
  35. webscout/Provider/typegpt.py +3 -184
  36. webscout/prompt_manager.py +2 -1
  37. webscout/version.py +1 -1
  38. webscout-8.1.dist-info/METADATA +683 -0
  39. {webscout-8.0.dist-info → webscout-8.1.dist-info}/RECORD +43 -23
  40. webscout/Provider/flowith.py +0 -207
  41. webscout-8.0.dist-info/METADATA +0 -995
  42. {webscout-8.0.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  43. {webscout-8.0.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  44. {webscout-8.0.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.0.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,421 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt
17
+ )
18
+
19
+ # ANSI escape codes for formatting
20
+ BOLD = "\033[1m"
21
+ RED = "\033[91m"
22
+ RESET = "\033[0m"
23
+
24
+ # Model configurations
25
+ MODEL_CONFIGS = {
26
+ "exaanswer": {
27
+ "endpoint": "https://exa-chat.vercel.app/api/exaanswer",
28
+ "models": ["exaanswer"],
29
+ },
30
+ "gemini": {
31
+ "endpoint": "https://exa-chat.vercel.app/api/gemini",
32
+ "models": [
33
+ "gemini-2.0-flash",
34
+ "gemini-2.0-flash-exp-image-generation",
35
+ "gemini-2.0-flash-thinking-exp-01-21",
36
+ "gemini-2.5-pro-exp-03-25",
37
+ "gemini-2.0-pro-exp-02-05",
38
+ ],
39
+ },
40
+ "openrouter": {
41
+ "endpoint": "https://exa-chat.vercel.app/api/openrouter",
42
+ "models": [
43
+ "mistralai/mistral-small-3.1-24b-instruct:free",
44
+ "deepseek/deepseek-r1:free",
45
+ "deepseek/deepseek-chat-v3-0324:free",
46
+ "google/gemma-3-27b-it:free",
47
+ "meta-llama/llama-4-maverick:free",
48
+ ],
49
+ },
50
+ "groq": {
51
+ "endpoint": "https://exa-chat.vercel.app/api/groq",
52
+ "models": [
53
+ "deepseek-r1-distill-llama-70b",
54
+ "deepseek-r1-distill-qwen-32b",
55
+ "gemma2-9b-it",
56
+ "llama-3.1-8b-instant",
57
+ "llama-3.2-1b-preview",
58
+ "llama-3.2-3b-preview",
59
+ "llama-3.2-90b-vision-preview",
60
+ "llama-3.3-70b-specdec",
61
+ "llama-3.3-70b-versatile",
62
+ "llama3-70b-8192",
63
+ "llama3-8b-8192",
64
+ "qwen-2.5-32b",
65
+ "qwen-2.5-coder-32b",
66
+ "qwen-qwq-32b",
67
+ "meta-llama/llama-4-scout-17b-16e-instruct"
68
+ ],
69
+ },
70
+ "cerebras": {
71
+ "endpoint": "https://exa-chat.vercel.app/api/cerebras",
72
+ "models": [
73
+ "llama3.1-8b",
74
+ "llama-3.3-70b"
75
+ ],
76
+ },
77
+ }
78
+
79
+ class Completions(BaseCompletions):
80
+ def __init__(self, client: 'ExaChat'):
81
+ self._client = client
82
+
83
+ def create(
84
+ self,
85
+ *,
86
+ model: str,
87
+ messages: List[Dict[str, str]],
88
+ max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
89
+ stream: bool = False,
90
+ temperature: Optional[float] = None,
91
+ top_p: Optional[float] = None,
92
+ **kwargs: Any
93
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
94
+ """
95
+ Creates a model response for the given chat conversation.
96
+ Mimics openai.chat.completions.create
97
+ """
98
+ # Format the messages using the format_prompt utility
99
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
100
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
101
+
102
+ # Determine the provider based on the model
103
+ provider = self._client._get_provider_from_model(model)
104
+
105
+ # Build the appropriate payload based on the provider
106
+ if provider == "exaanswer":
107
+ payload = {
108
+ "query": question,
109
+ "messages": []
110
+ }
111
+ elif provider in ["gemini", "cerebras"]:
112
+ payload = {
113
+ "query": question,
114
+ "model": model,
115
+ "messages": []
116
+ }
117
+ else: # openrouter or groq
118
+ payload = {
119
+ "query": question + "\n", # Add newline for openrouter and groq models
120
+ "model": model,
121
+ "messages": []
122
+ }
123
+
124
+ request_id = f"chatcmpl-{uuid.uuid4()}"
125
+ created_time = int(time.time())
126
+
127
+ if stream:
128
+ return self._create_stream(request_id, created_time, model, provider, payload)
129
+ else:
130
+ return self._create_non_stream(request_id, created_time, model, provider, payload)
131
+
132
+ def _create_stream(
133
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
134
+ ) -> Generator[ChatCompletionChunk, None, None]:
135
+ try:
136
+ endpoint = self._client._get_endpoint(provider)
137
+ response = self._client.session.post(
138
+ endpoint,
139
+ headers=self._client.headers,
140
+ json=payload,
141
+ stream=True,
142
+ timeout=self._client.timeout
143
+ )
144
+ response.raise_for_status()
145
+
146
+ # Track token usage across chunks
147
+ completion_tokens = 0
148
+ streaming_text = ""
149
+
150
+ for line in response.iter_lines():
151
+ if not line:
152
+ continue
153
+
154
+ try:
155
+ data = json.loads(line.decode('utf-8'))
156
+ if 'choices' in data and len(data['choices']) > 0:
157
+ content = data['choices'][0].get('delta', {}).get('content', '')
158
+ if content:
159
+ streaming_text += content
160
+ completion_tokens += len(content) // 4 # Rough estimate
161
+
162
+ # Create a delta object for this chunk
163
+ delta = ChoiceDelta(content=content)
164
+ choice = Choice(index=0, delta=delta, finish_reason=None)
165
+
166
+ chunk = ChatCompletionChunk(
167
+ id=request_id,
168
+ choices=[choice],
169
+ created=created_time,
170
+ model=model,
171
+ )
172
+
173
+ yield chunk
174
+ except json.JSONDecodeError:
175
+ continue
176
+
177
+ # Final chunk with finish_reason
178
+ delta = ChoiceDelta(content=None)
179
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
180
+
181
+ chunk = ChatCompletionChunk(
182
+ id=request_id,
183
+ choices=[choice],
184
+ created=created_time,
185
+ model=model,
186
+ )
187
+
188
+ yield chunk
189
+
190
+ except requests.exceptions.RequestException as e:
191
+ print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
192
+ raise IOError(f"ExaChat request failed: {e}") from e
193
+
194
+ def _create_non_stream(
195
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
196
+ ) -> ChatCompletion:
197
+ try:
198
+ endpoint = self._client._get_endpoint(provider)
199
+ response = self._client.session.post(
200
+ endpoint,
201
+ headers=self._client.headers,
202
+ json=payload,
203
+ timeout=self._client.timeout
204
+ )
205
+ response.raise_for_status()
206
+
207
+ full_response = ""
208
+ for line in response.iter_lines():
209
+ if line:
210
+ try:
211
+ data = json.loads(line.decode('utf-8'))
212
+ if 'choices' in data and len(data['choices']) > 0:
213
+ content = data['choices'][0].get('delta', {}).get('content', '')
214
+ if content:
215
+ full_response += content
216
+ except json.JSONDecodeError:
217
+ continue
218
+
219
+ # Create usage statistics (estimated)
220
+ prompt_tokens = len(payload["query"]) // 4
221
+ completion_tokens = len(full_response) // 4
222
+ total_tokens = prompt_tokens + completion_tokens
223
+
224
+ usage = CompletionUsage(
225
+ prompt_tokens=prompt_tokens,
226
+ completion_tokens=completion_tokens,
227
+ total_tokens=total_tokens
228
+ )
229
+
230
+ # Create the message object
231
+ message = ChatCompletionMessage(
232
+ role="assistant",
233
+ content=full_response
234
+ )
235
+
236
+ # Create the choice object
237
+ choice = Choice(
238
+ index=0,
239
+ message=message,
240
+ finish_reason="stop"
241
+ )
242
+
243
+ # Create the completion object
244
+ completion = ChatCompletion(
245
+ id=request_id,
246
+ choices=[choice],
247
+ created=created_time,
248
+ model=model,
249
+ usage=usage,
250
+ )
251
+
252
+ return completion
253
+
254
+ except Exception as e:
255
+ print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
256
+ raise IOError(f"ExaChat request failed: {e}") from e
257
+
258
+ class Chat(BaseChat):
259
+ def __init__(self, client: 'ExaChat'):
260
+ self.completions = Completions(client)
261
+
262
+ class ExaChat(OpenAICompatibleProvider):
263
+ """
264
+ OpenAI-compatible client for ExaChat API.
265
+
266
+ Usage:
267
+ client = ExaChat()
268
+ response = client.chat.completions.create(
269
+ model="exaanswer",
270
+ messages=[{"role": "user", "content": "Hello!"}]
271
+ )
272
+ print(response.choices[0].message.content)
273
+ """
274
+
275
+ AVAILABLE_MODELS = [
276
+ # ExaAnswer Models
277
+ "exaanswer",
278
+
279
+ # Gemini Models
280
+ "gemini-2.0-flash",
281
+ "gemini-2.0-flash-exp-image-generation",
282
+ "gemini-2.0-flash-thinking-exp-01-21",
283
+ "gemini-2.5-pro-exp-03-25",
284
+ "gemini-2.0-pro-exp-02-05",
285
+
286
+ # OpenRouter Models
287
+ "mistralai/mistral-small-3.1-24b-instruct:free",
288
+ "deepseek/deepseek-r1:free",
289
+ "deepseek/deepseek-chat-v3-0324:free",
290
+ "google/gemma-3-27b-it:free",
291
+ "meta-llama/llama-4-maverick:free",
292
+
293
+ # Groq Models
294
+ "deepseek-r1-distill-llama-70b",
295
+ "deepseek-r1-distill-qwen-32b",
296
+ "gemma2-9b-it",
297
+ "llama-3.1-8b-instant",
298
+ "llama-3.2-1b-preview",
299
+ "llama-3.2-3b-preview",
300
+ "llama-3.2-90b-vision-preview",
301
+ "llama-3.3-70b-specdec",
302
+ "llama-3.3-70b-versatile",
303
+ "llama3-70b-8192",
304
+ "llama3-8b-8192",
305
+ "qwen-2.5-32b",
306
+ "qwen-2.5-coder-32b",
307
+ "qwen-qwq-32b",
308
+ "meta-llama/llama-4-scout-17b-16e-instruct",
309
+
310
+ # Cerebras Models
311
+ "llama3.1-8b",
312
+ "llama-3.3-70b"
313
+ ]
314
+
315
+ def __init__(
316
+ self,
317
+ timeout: int = 30,
318
+ temperature: float = 0.5,
319
+ top_p: float = 1.0
320
+ ):
321
+ """
322
+ Initialize the ExaChat client.
323
+
324
+ Args:
325
+ timeout: Request timeout in seconds.
326
+ temperature: Temperature for response generation.
327
+ top_p: Top-p sampling parameter.
328
+ """
329
+ self.timeout = timeout
330
+ self.temperature = temperature
331
+ self.top_p = top_p
332
+
333
+ # Initialize LitAgent for user agent generation
334
+ agent = LitAgent()
335
+
336
+ self.headers = {
337
+ "accept": "*/*",
338
+ "accept-language": "en-US,en;q=0.9",
339
+ "content-type": "application/json",
340
+ "origin": "https://exa-chat.vercel.app",
341
+ "referer": "https://exa-chat.vercel.app/",
342
+ "user-agent": agent.random(),
343
+ }
344
+
345
+ self.session = requests.Session()
346
+ self.session.headers.update(self.headers)
347
+ self.session.cookies.update({"session": uuid.uuid4().hex})
348
+
349
+ # Initialize the chat interface
350
+ self.chat = Chat(self)
351
+
352
+ def _get_endpoint(self, provider: str) -> str:
353
+ """Get the API endpoint for the specified provider."""
354
+ return MODEL_CONFIGS[provider]["endpoint"]
355
+
356
+ def _get_provider_from_model(self, model: str) -> str:
357
+ """Determine the provider based on the model name."""
358
+ for provider, config in MODEL_CONFIGS.items():
359
+ if model in config["models"]:
360
+ return provider
361
+
362
+ # If model not found, use a default model
363
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
364
+ return "exaanswer"
365
+
366
+ def convert_model_name(self, model: str) -> str:
367
+ """
368
+ Ensure the model name is in the correct format.
369
+ """
370
+ if model in self.AVAILABLE_MODELS:
371
+ return model
372
+
373
+ # Try to find a matching model
374
+ for available_model in self.AVAILABLE_MODELS:
375
+ if model.lower() in available_model.lower():
376
+ return available_model
377
+
378
+ # Default to exaanswer if no match
379
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
380
+ return "exaanswer"
381
+
382
+
383
+ # Simple test if run directly
384
+ if __name__ == "__main__":
385
+ print("-" * 80)
386
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
387
+ print("-" * 80)
388
+
389
+ # Test a subset of models to avoid excessive API calls
390
+ test_models = [
391
+ "exaanswer",
392
+ "gemini-2.0-flash",
393
+ "deepseek/deepseek-r1:free",
394
+ "llama-3.1-8b-instant",
395
+ "llama3.1-8b"
396
+ ]
397
+
398
+ for model in test_models:
399
+ try:
400
+ client = ExaChat(timeout=60)
401
+ # Test with a simple conversation to demonstrate format_prompt usage
402
+ response = client.chat.completions.create(
403
+ model=model,
404
+ messages=[
405
+ {"role": "system", "content": "You are a helpful assistant."},
406
+ {"role": "user", "content": "Say 'Hello' in one word"},
407
+ ],
408
+ stream=False
409
+ )
410
+
411
+ if response and response.choices and response.choices[0].message.content:
412
+ status = "✓"
413
+ # Truncate response if too long
414
+ display_text = response.choices[0].message.content.strip()
415
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
416
+ else:
417
+ status = "✗"
418
+ display_text = "Empty or invalid response"
419
+ print(f"{model:<50} {status:<10} {display_text}")
420
+ except Exception as e:
421
+ print(f"{model:<50} {'✗':<10} {str(e)}")