webscout 8.0__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  2. webscout/Provider/AISEARCH/ISou.py +1 -1
  3. webscout/Provider/AISEARCH/__init__.py +2 -1
  4. webscout/Provider/AISEARCH/felo_search.py +1 -1
  5. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +1 -1
  7. webscout/Provider/AISEARCH/iask_search.py +436 -0
  8. webscout/Provider/AISEARCH/scira_search.py +1 -1
  9. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  10. webscout/Provider/ExaAI.py +1 -1
  11. webscout/Provider/Jadve.py +2 -2
  12. webscout/Provider/OPENAI/__init__.py +17 -0
  13. webscout/Provider/OPENAI/base.py +46 -0
  14. webscout/Provider/OPENAI/c4ai.py +347 -0
  15. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  16. webscout/Provider/OPENAI/deepinfra.py +284 -0
  17. webscout/Provider/OPENAI/exaai.py +419 -0
  18. webscout/Provider/OPENAI/exachat.py +421 -0
  19. webscout/Provider/OPENAI/freeaichat.py +355 -0
  20. webscout/Provider/OPENAI/glider.py +314 -0
  21. webscout/Provider/OPENAI/heckai.py +337 -0
  22. webscout/Provider/OPENAI/llmchatco.py +325 -0
  23. webscout/Provider/OPENAI/netwrck.py +348 -0
  24. webscout/Provider/OPENAI/scirachat.py +459 -0
  25. webscout/Provider/OPENAI/sonus.py +294 -0
  26. webscout/Provider/OPENAI/typegpt.py +361 -0
  27. webscout/Provider/OPENAI/utils.py +211 -0
  28. webscout/Provider/OPENAI/venice.py +428 -0
  29. webscout/Provider/OPENAI/wisecat.py +381 -0
  30. webscout/Provider/OPENAI/x0gpt.py +389 -0
  31. webscout/Provider/OPENAI/yep.py +329 -0
  32. webscout/Provider/Venice.py +1 -1
  33. webscout/Provider/__init__.py +6 -6
  34. webscout/Provider/scira_chat.py +13 -10
  35. webscout/Provider/typegpt.py +3 -184
  36. webscout/prompt_manager.py +2 -1
  37. webscout/version.py +1 -1
  38. webscout-8.1.dist-info/METADATA +683 -0
  39. {webscout-8.0.dist-info → webscout-8.1.dist-info}/RECORD +43 -23
  40. webscout/Provider/flowith.py +0 -207
  41. webscout-8.0.dist-info/METADATA +0 -995
  42. {webscout-8.0.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  43. {webscout-8.0.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  44. {webscout-8.0.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.0.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,460 @@
1
+ import time
2
+ import uuid
3
+ import cloudscraper
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ # --- ChatGPTClone Client ---
34
+
35
+ class Completions(BaseCompletions):
36
+ def __init__(self, client: 'ChatGPTClone'):
37
+ self._client = client
38
+
39
+ def create(
40
+ self,
41
+ *,
42
+ model: str,
43
+ messages: List[Dict[str, str]],
44
+ max_tokens: Optional[int] = 2049,
45
+ stream: bool = False,
46
+ temperature: Optional[float] = None,
47
+ top_p: Optional[float] = None,
48
+ **kwargs: Any
49
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
+ """
51
+ Creates a model response for the given chat conversation.
52
+ Mimics openai.chat.completions.create
53
+ """
54
+ # Prepare the payload for ChatGPTClone API
55
+ payload = {
56
+ "messages": messages,
57
+ "model": self._client.convert_model_name(model),
58
+ }
59
+
60
+ # Add optional parameters if provided
61
+ if max_tokens is not None and max_tokens > 0:
62
+ payload["max_tokens"] = max_tokens
63
+
64
+ if temperature is not None:
65
+ payload["temperature"] = temperature
66
+
67
+ if top_p is not None:
68
+ payload["top_p"] = top_p
69
+
70
+ # Add any additional parameters
71
+ payload.update(kwargs)
72
+
73
+ request_id = f"chatcmpl-{uuid.uuid4()}"
74
+ created_time = int(time.time())
75
+
76
+ if stream:
77
+ return self._create_stream(request_id, created_time, model, payload)
78
+ else:
79
+ return self._create_non_stream(request_id, created_time, model, payload)
80
+
81
+ def _create_stream(
82
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
83
+ ) -> Generator[ChatCompletionChunk, None, None]:
84
+ try:
85
+ response = self._client.session.post(
86
+ f"{self._client.url}/api/chat",
87
+ headers=self._client.headers,
88
+ cookies=self._client.cookies,
89
+ json=payload,
90
+ stream=True,
91
+ timeout=self._client.timeout
92
+ )
93
+
94
+ # Handle non-200 responses
95
+ if not response.ok:
96
+ # If we get a non-200 response, try refreshing our identity once
97
+ if response.status_code in [403, 429]:
98
+ self._client.refresh_identity()
99
+ # Retry with new identity
100
+ response = self._client.session.post(
101
+ f"{self._client.url}/api/chat",
102
+ headers=self._client.headers,
103
+ cookies=self._client.cookies,
104
+ json=payload,
105
+ stream=True,
106
+ timeout=self._client.timeout
107
+ )
108
+ if not response.ok:
109
+ raise IOError(
110
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
111
+ )
112
+ else:
113
+ raise IOError(
114
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
115
+ )
116
+
117
+ # Track token usage across chunks
118
+ prompt_tokens = 0
119
+ completion_tokens = 0
120
+ total_tokens = 0
121
+
122
+ # Estimate prompt tokens based on message length
123
+ for msg in payload.get("messages", []):
124
+ prompt_tokens += len(msg.get("content", "").split())
125
+
126
+ for line in response.iter_lines():
127
+ if line:
128
+ decoded_line = line.decode('utf-8').strip()
129
+
130
+ # ChatGPTClone uses a different format, so we need to extract the content
131
+ match = re.search(r'0:"(.*?)"', decoded_line)
132
+ if match:
133
+ content = match.group(1)
134
+
135
+ # Format the content (replace escaped newlines)
136
+ content = self._client.format_text(content)
137
+
138
+ # Update token counts
139
+ completion_tokens += 1
140
+ total_tokens = prompt_tokens + completion_tokens
141
+
142
+ # Create the delta object
143
+ delta = ChoiceDelta(
144
+ content=content,
145
+ role="assistant",
146
+ tool_calls=None
147
+ )
148
+
149
+ # Create the choice object
150
+ choice = Choice(
151
+ index=0,
152
+ delta=delta,
153
+ finish_reason=None,
154
+ logprobs=None
155
+ )
156
+
157
+ # Create the chunk object
158
+ chunk = ChatCompletionChunk(
159
+ id=request_id,
160
+ choices=[choice],
161
+ created=created_time,
162
+ model=model,
163
+ system_fingerprint=None
164
+ )
165
+
166
+ # Convert to dict for proper formatting
167
+ chunk_dict = chunk.to_dict()
168
+
169
+ # Add usage information to match OpenAI format
170
+ usage_dict = {
171
+ "prompt_tokens": prompt_tokens,
172
+ "completion_tokens": completion_tokens,
173
+ "total_tokens": total_tokens,
174
+ "estimated_cost": None
175
+ }
176
+
177
+ chunk_dict["usage"] = usage_dict
178
+
179
+ # Return the chunk object for internal processing
180
+ yield chunk
181
+
182
+ # Final chunk with finish_reason="stop"
183
+ delta = ChoiceDelta(
184
+ content=None,
185
+ role=None,
186
+ tool_calls=None
187
+ )
188
+
189
+ choice = Choice(
190
+ index=0,
191
+ delta=delta,
192
+ finish_reason="stop",
193
+ logprobs=None
194
+ )
195
+
196
+ chunk = ChatCompletionChunk(
197
+ id=request_id,
198
+ choices=[choice],
199
+ created=created_time,
200
+ model=model,
201
+ system_fingerprint=None
202
+ )
203
+
204
+ chunk_dict = chunk.to_dict()
205
+ chunk_dict["usage"] = {
206
+ "prompt_tokens": prompt_tokens,
207
+ "completion_tokens": completion_tokens,
208
+ "total_tokens": total_tokens,
209
+ "estimated_cost": None
210
+ }
211
+
212
+ yield chunk
213
+
214
+ except Exception as e:
215
+ print(f"Error during ChatGPTClone stream request: {e}")
216
+ raise IOError(f"ChatGPTClone request failed: {e}") from e
217
+
218
+ def _create_non_stream(
219
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
220
+ ) -> ChatCompletion:
221
+ try:
222
+ # For non-streaming, we still use streaming internally to collect the full response
223
+ response = self._client.session.post(
224
+ f"{self._client.url}/api/chat",
225
+ headers=self._client.headers,
226
+ cookies=self._client.cookies,
227
+ json=payload,
228
+ stream=True,
229
+ timeout=self._client.timeout
230
+ )
231
+
232
+ # Handle non-200 responses
233
+ if not response.ok:
234
+ # If we get a non-200 response, try refreshing our identity once
235
+ if response.status_code in [403, 429]:
236
+ self._client.refresh_identity()
237
+ # Retry with new identity
238
+ response = self._client.session.post(
239
+ f"{self._client.url}/api/chat",
240
+ headers=self._client.headers,
241
+ cookies=self._client.cookies,
242
+ json=payload,
243
+ stream=True,
244
+ timeout=self._client.timeout
245
+ )
246
+ if not response.ok:
247
+ raise IOError(
248
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
249
+ )
250
+ else:
251
+ raise IOError(
252
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
253
+ )
254
+
255
+ # Collect the full response
256
+ full_text = ""
257
+ for line in response.iter_lines(decode_unicode=True):
258
+ if line:
259
+ match = re.search(r'0:"(.*?)"', line)
260
+ if match:
261
+ content = match.group(1)
262
+ full_text += content
263
+
264
+ # Format the text (replace escaped newlines)
265
+ full_text = self._client.format_text(full_text)
266
+
267
+ # Estimate token counts
268
+ prompt_tokens = 0
269
+ for msg in payload.get("messages", []):
270
+ prompt_tokens += len(msg.get("content", "").split())
271
+
272
+ completion_tokens = len(full_text.split())
273
+ total_tokens = prompt_tokens + completion_tokens
274
+
275
+ # Create the message object
276
+ message = ChatCompletionMessage(
277
+ role="assistant",
278
+ content=full_text
279
+ )
280
+
281
+ # Create the choice object
282
+ choice = Choice(
283
+ index=0,
284
+ message=message,
285
+ finish_reason="stop"
286
+ )
287
+
288
+ # Create the usage object
289
+ usage = CompletionUsage(
290
+ prompt_tokens=prompt_tokens,
291
+ completion_tokens=completion_tokens,
292
+ total_tokens=total_tokens
293
+ )
294
+
295
+ # Create the completion object
296
+ completion = ChatCompletion(
297
+ id=request_id,
298
+ choices=[choice],
299
+ created=created_time,
300
+ model=model,
301
+ usage=usage,
302
+ )
303
+
304
+ return completion
305
+
306
+ except Exception as e:
307
+ print(f"Error during ChatGPTClone non-stream request: {e}")
308
+ raise IOError(f"ChatGPTClone request failed: {e}") from e
309
+
310
+ class Chat(BaseChat):
311
+ def __init__(self, client: 'ChatGPTClone'):
312
+ self.completions = Completions(client)
313
+
314
+ class ChatGPTClone(OpenAICompatibleProvider):
315
+ """
316
+ OpenAI-compatible client for ChatGPT Clone API.
317
+
318
+ Usage:
319
+ client = ChatGPTClone()
320
+ response = client.chat.completions.create(
321
+ model="gpt-4",
322
+ messages=[{"role": "user", "content": "Hello!"}]
323
+ )
324
+ """
325
+
326
+ url = "https://chatgpt-clone-ten-nu.vercel.app"
327
+ AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
328
+
329
+ def __init__(
330
+ self,
331
+ timeout: Optional[int] = None,
332
+ browser: str = "chrome"
333
+ ):
334
+ """
335
+ Initialize the ChatGPTClone client.
336
+
337
+ Args:
338
+ timeout: Request timeout in seconds (None for no timeout)
339
+ browser: Browser to emulate in user agent
340
+ """
341
+ self.timeout = timeout
342
+ self.temperature = 0.6 # Default temperature
343
+ self.top_p = 0.7 # Default top_p
344
+
345
+ # Use cloudscraper to bypass Cloudflare protection
346
+ self.session = cloudscraper.create_scraper()
347
+
348
+ # Initialize LitAgent for user agent generation
349
+ agent = LitAgent()
350
+ self.fingerprint = agent.generate_fingerprint(browser)
351
+
352
+ # Use the fingerprint for headers
353
+ self.headers = {
354
+ "Accept": self.fingerprint["accept"],
355
+ "Accept-Encoding": "gzip, deflate, br, zstd",
356
+ "Accept-Language": self.fingerprint["accept_language"],
357
+ "Content-Type": "application/json",
358
+ "DNT": "1",
359
+ "Origin": self.url,
360
+ "Referer": f"{self.url}/",
361
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
362
+ "Sec-CH-UA-Mobile": "?0",
363
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
364
+ "User-Agent": self.fingerprint["user_agent"],
365
+ }
366
+
367
+ # Create session cookies with unique identifiers
368
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
369
+
370
+ # Set consistent headers for the scraper session
371
+ for header, value in self.headers.items():
372
+ self.session.headers[header] = value
373
+
374
+ # Initialize the chat interface
375
+ self.chat = Chat(self)
376
+
377
+ def refresh_identity(self, browser: str = None):
378
+ """Refreshes the browser identity fingerprint."""
379
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
380
+ self.fingerprint = LitAgent().generate_fingerprint(browser)
381
+
382
+ # Update headers with new fingerprint
383
+ self.headers.update({
384
+ "Accept": self.fingerprint["accept"],
385
+ "Accept-Language": self.fingerprint["accept_language"],
386
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
387
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
388
+ "User-Agent": self.fingerprint["user_agent"],
389
+ })
390
+
391
+ # Update session headers
392
+ for header, value in self.headers.items():
393
+ self.session.headers[header] = value
394
+
395
+ # Generate new cookies
396
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
397
+
398
+ return self.fingerprint
399
+
400
+ def format_text(self, text: str) -> str:
401
+ """
402
+ Format text by replacing escaped newlines with actual newlines.
403
+
404
+ Args:
405
+ text: Text to format
406
+
407
+ Returns:
408
+ Formatted text
409
+ """
410
+ # Use a more comprehensive approach to handle all escape sequences
411
+ try:
412
+ # First handle double backslashes to avoid issues
413
+ text = text.replace('\\\\', '\\')
414
+
415
+ # Handle common escape sequences
416
+ text = text.replace('\\n', '\n')
417
+ text = text.replace('\\r', '\r')
418
+ text = text.replace('\\t', '\t')
419
+ text = text.replace('\\"', '"')
420
+ text = text.replace("\\'\'", "'")
421
+
422
+ # Handle any remaining escape sequences using JSON decoding
423
+ # This is a fallback in case there are other escape sequences
424
+ try:
425
+ # Add quotes to make it a valid JSON string
426
+ json_str = f'"{text}"'
427
+ # Use json module to decode all escape sequences
428
+ decoded = json.loads(json_str)
429
+ return decoded
430
+ except json.JSONDecodeError:
431
+ # If JSON decoding fails, return the text with the replacements we've already done
432
+ return text
433
+ except Exception as e:
434
+ # If any error occurs, return the original text
435
+ print(f"Warning: Error formatting text: {e}")
436
+ return text
437
+
438
+ def convert_model_name(self, model: str) -> str:
439
+ """
440
+ Convert model names to ones supported by ChatGPTClone.
441
+
442
+ Args:
443
+ model: Model name to convert
444
+
445
+ Returns:
446
+ ChatGPTClone model name
447
+ """
448
+ # If the model is already a valid ChatGPTClone model, return it
449
+ if model in self.AVAILABLE_MODELS:
450
+ return model
451
+
452
+ # Map similar models to supported ones
453
+ if model.startswith("gpt-4"):
454
+ return "gpt-4"
455
+ elif model.startswith("gpt-3.5"):
456
+ return "gpt-3.5-turbo"
457
+
458
+ # Default to the most capable model
459
+ print(f"Warning: Unknown model '{model}'. Using 'gpt-4' instead.")
460
+ return "gpt-4"