webscout 8.1__py3-none-any.whl → 8.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (60) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  14. webscout/Provider/AISEARCH/__init__.py +2 -1
  15. webscout/Provider/AISEARCH/scira_search.py +8 -4
  16. webscout/Provider/ExaChat.py +18 -8
  17. webscout/Provider/GithubChat.py +5 -1
  18. webscout/Provider/Glider.py +4 -2
  19. webscout/Provider/OPENAI/__init__.py +9 -1
  20. webscout/Provider/OPENAI/c4ai.py +22 -2
  21. webscout/Provider/OPENAI/chatgpt.py +549 -0
  22. webscout/Provider/OPENAI/deepinfra.py +1 -13
  23. webscout/Provider/OPENAI/e2b.py +1192 -0
  24. webscout/Provider/OPENAI/exaai.py +1 -16
  25. webscout/Provider/OPENAI/exachat.py +20 -8
  26. webscout/Provider/OPENAI/freeaichat.py +1 -4
  27. webscout/Provider/OPENAI/glider.py +3 -1
  28. webscout/Provider/OPENAI/llmchatco.py +3 -1
  29. webscout/Provider/OPENAI/opkfc.py +488 -0
  30. webscout/Provider/OPENAI/scirachat.py +11 -7
  31. webscout/Provider/OPENAI/standardinput.py +425 -0
  32. webscout/Provider/OPENAI/textpollinations.py +285 -0
  33. webscout/Provider/OPENAI/toolbaz.py +405 -0
  34. webscout/Provider/OPENAI/typegpt.py +1 -16
  35. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  36. webscout/Provider/OPENAI/venice.py +1 -16
  37. webscout/Provider/OPENAI/writecream.py +156 -0
  38. webscout/Provider/OPENAI/x0gpt.py +2 -20
  39. webscout/Provider/OPENAI/yep.py +2 -4
  40. webscout/Provider/StandardInput.py +278 -0
  41. webscout/Provider/TextPollinationsAI.py +27 -28
  42. webscout/Provider/Writecream.py +211 -0
  43. webscout/Provider/WritingMate.py +197 -0
  44. webscout/Provider/Youchat.py +30 -26
  45. webscout/Provider/__init__.py +10 -2
  46. webscout/Provider/koala.py +2 -2
  47. webscout/Provider/llmchatco.py +5 -0
  48. webscout/Provider/scira_chat.py +5 -2
  49. webscout/Provider/scnet.py +187 -0
  50. webscout/Provider/toolbaz.py +320 -0
  51. webscout/Provider/uncovr.py +3 -3
  52. webscout/conversation.py +32 -32
  53. webscout/version.py +1 -1
  54. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/METADATA +54 -3
  55. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/RECORD +59 -33
  56. webscout-8.2.1.dist-info/entry_points.txt +5 -0
  57. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/top_level.txt +1 -0
  58. webscout-8.1.dist-info/entry_points.txt +0 -3
  59. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/LICENSE.md +0 -0
  60. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/WHEEL +0 -0
@@ -15,22 +15,7 @@ from .utils import (
15
15
  try:
16
16
  from webscout.litagent import LitAgent
17
17
  except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
- # Return minimal default headers if LitAgent is unavailable
22
- print("Warning: LitAgent not found. Using default minimal headers.")
23
- return {
24
- "accept": "*/*",
25
- "accept_language": "en-US,en;q=0.9",
26
- "platform": "Windows",
27
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
28
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
29
- "browser_type": browser,
30
- }
31
-
32
- def random(self) -> str:
33
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
18
+ print("Warning: LitAgent not found. Functionality may be limited.")
34
19
 
35
20
  # --- TypeGPT Client ---
36
21
 
@@ -0,0 +1,455 @@
1
+ import time
2
+ import uuid
3
+ import re
4
+ import json
5
+ import cloudscraper
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.litagent import LitAgent
9
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
+ from .utils import (
11
+ ChatCompletion,
12
+ ChatCompletionChunk,
13
+ Choice,
14
+ ChatCompletionMessage,
15
+ ChoiceDelta,
16
+ CompletionUsage,
17
+ format_prompt,
18
+ get_system_prompt,
19
+ get_last_user_message
20
+ )
21
+
22
+ # ANSI escape codes for formatting
23
+ BOLD = "\033[1m"
24
+ RED = "\033[91m"
25
+ RESET = "\033[0m"
26
+
27
+ class Completions(BaseCompletions):
28
+ def __init__(self, client: 'UncovrAI'):
29
+ self._client = client
30
+
31
+ def create(
32
+ self,
33
+ *,
34
+ model: str,
35
+ messages: List[Dict[str, str]],
36
+ max_tokens: Optional[int] = None,
37
+ stream: bool = False,
38
+ temperature: Optional[float] = None,
39
+ top_p: Optional[float] = None,
40
+ **kwargs: Any
41
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
+ """
43
+ Create a chat completion using the UncovrAI API.
44
+
45
+ Args:
46
+ model: The model to use for completion
47
+ messages: A list of messages in the conversation
48
+ max_tokens: Maximum number of tokens to generate
49
+ stream: Whether to stream the response
50
+ temperature: Controls randomness (mapped to UncovrAI's temperature)
51
+ top_p: Controls diversity (not directly used by UncovrAI)
52
+ **kwargs: Additional parameters
53
+
54
+ Returns:
55
+ A ChatCompletion object or a generator of ChatCompletionChunk objects
56
+ """
57
+ # Validate model
58
+ if model not in self._client.AVAILABLE_MODELS:
59
+ raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
60
+
61
+ # Map temperature to UncovrAI's scale (0-100)
62
+ # Default to 32 (medium) if not provided
63
+ uncovr_temperature = 32
64
+ if temperature is not None:
65
+ # Map from 0-1 scale to 0-100 scale
66
+ uncovr_temperature = int(temperature * 100)
67
+ # Ensure it's within bounds
68
+ uncovr_temperature = max(0, min(100, uncovr_temperature))
69
+
70
+ # Map creativity from kwargs or use default
71
+ creativity = kwargs.get("creativity", "medium")
72
+
73
+ # Get focus and tools from kwargs or use defaults
74
+ selected_focus = kwargs.get("selected_focus", ["web"])
75
+ selected_tools = kwargs.get("selected_tools", ["quick-cards"])
76
+
77
+ # Generate request ID and timestamp
78
+ request_id = str(uuid.uuid4())
79
+ created_time = int(time.time())
80
+
81
+ # Format the conversation using utility functions
82
+ conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
83
+
84
+ # Prepare the request payload
85
+ payload = {
86
+ "content": conversation_prompt,
87
+ "chatId": self._client.chat_id,
88
+ "userMessageId": str(uuid.uuid4()),
89
+ "ai_config": {
90
+ "selectedFocus": selected_focus,
91
+ "selectedTools": selected_tools,
92
+ "agentId": "chat",
93
+ "modelId": model,
94
+ "temperature": uncovr_temperature,
95
+ "creativity": creativity
96
+ }
97
+ }
98
+
99
+ # Handle streaming response
100
+ if stream:
101
+ return self._handle_streaming_response(
102
+ payload=payload,
103
+ model=model,
104
+ request_id=request_id,
105
+ created_time=created_time
106
+ )
107
+
108
+ # Handle non-streaming response
109
+ return self._handle_non_streaming_response(
110
+ payload=payload,
111
+ model=model,
112
+ request_id=request_id,
113
+ created_time=created_time
114
+ )
115
+
116
+ def _handle_streaming_response(
117
+ self,
118
+ *,
119
+ payload: Dict[str, Any],
120
+ model: str,
121
+ request_id: str,
122
+ created_time: int
123
+ ) -> Generator[ChatCompletionChunk, None, None]:
124
+ """Handle streaming response from UncovrAI API."""
125
+ try:
126
+ with self._client.session.post(
127
+ self._client.url,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self._client.timeout
131
+ ) as response:
132
+ if response.status_code != 200:
133
+ # If we get a non-200 response, try refreshing our identity once
134
+ if response.status_code in [403, 429]:
135
+ self._client.refresh_identity()
136
+ # Retry with new identity
137
+ with self._client.session.post(
138
+ self._client.url,
139
+ json=payload,
140
+ stream=True,
141
+ timeout=self._client.timeout
142
+ ) as retry_response:
143
+ if not retry_response.ok:
144
+ raise IOError(
145
+ f"Failed to generate response after identity refresh - "
146
+ f"({retry_response.status_code}, {retry_response.reason}) - "
147
+ f"{retry_response.text}"
148
+ )
149
+ response = retry_response
150
+ else:
151
+ raise IOError(f"Request failed with status code {response.status_code}")
152
+
153
+ # Process the streaming response
154
+ streaming_text = ""
155
+ for line in response.iter_lines():
156
+ if line:
157
+ try:
158
+ line = line.decode('utf-8')
159
+
160
+ # Use regex to match content messages
161
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
162
+ if content_match: # Content message
163
+ content = content_match.group(1)
164
+ # Format the content to handle escape sequences
165
+ content = self._client.format_text(content)
166
+ streaming_text += content
167
+
168
+ # Create a chunk for this part of the response
169
+ delta = ChoiceDelta(content=content)
170
+ choice = Choice(
171
+ index=0,
172
+ delta=delta,
173
+ finish_reason=None
174
+ )
175
+ chunk = ChatCompletionChunk(
176
+ id=request_id,
177
+ choices=[choice],
178
+ created=created_time,
179
+ model=model
180
+ )
181
+
182
+ yield chunk
183
+
184
+ # Check for error messages
185
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
186
+ if error_match:
187
+ error_msg = error_match.group(1)
188
+ raise IOError(f"API Error: {error_msg}")
189
+
190
+ except (json.JSONDecodeError, UnicodeDecodeError):
191
+ continue
192
+
193
+ # Yield a final chunk with finish_reason="stop"
194
+ delta = ChoiceDelta()
195
+ choice = Choice(
196
+ index=0,
197
+ delta=delta,
198
+ finish_reason="stop"
199
+ )
200
+ chunk = ChatCompletionChunk(
201
+ id=request_id,
202
+ choices=[choice],
203
+ created=created_time,
204
+ model=model
205
+ )
206
+ yield chunk
207
+
208
+ except Exception as e:
209
+ print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
210
+ raise IOError(f"UncovrAI streaming request failed: {e}") from e
211
+
212
+ def _handle_non_streaming_response(
213
+ self,
214
+ *,
215
+ payload: Dict[str, Any],
216
+ model: str,
217
+ request_id: str,
218
+ created_time: int
219
+ ) -> ChatCompletion:
220
+ """Handle non-streaming response from UncovrAI API."""
221
+ try:
222
+ response = self._client.session.post(
223
+ self._client.url,
224
+ json=payload,
225
+ timeout=self._client.timeout
226
+ )
227
+
228
+ if response.status_code != 200:
229
+ if response.status_code in [403, 429]:
230
+ self._client.refresh_identity()
231
+ response = self._client.session.post(
232
+ self._client.url,
233
+ json=payload,
234
+ timeout=self._client.timeout
235
+ )
236
+ if not response.ok:
237
+ raise IOError(
238
+ f"Failed to generate response after identity refresh - "
239
+ f"({response.status_code}, {response.reason}) - "
240
+ f"{response.text}"
241
+ )
242
+ else:
243
+ raise IOError(f"Request failed with status code {response.status_code}")
244
+
245
+ full_response = ""
246
+ for line in response.iter_lines():
247
+ if line:
248
+ try:
249
+ line = line.decode('utf-8')
250
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
251
+ if content_match:
252
+ content = content_match.group(1)
253
+ full_response += content
254
+
255
+ # Check for error messages
256
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
257
+ if error_match:
258
+ error_msg = error_match.group(1)
259
+ raise IOError(f"API Error: {error_msg}")
260
+
261
+ except (json.JSONDecodeError, UnicodeDecodeError):
262
+ continue
263
+
264
+ # Format the full response to handle escape sequences
265
+ full_response = self._client.format_text(full_response)
266
+
267
+ # Create message, choice, and usage objects
268
+ message = ChatCompletionMessage(
269
+ role="assistant",
270
+ content=full_response
271
+ )
272
+
273
+ choice = Choice(
274
+ index=0,
275
+ message=message,
276
+ finish_reason="stop"
277
+ )
278
+
279
+ # Estimate token usage (this is approximate)
280
+ prompt_tokens = len(payload["content"]) // 4
281
+ completion_tokens = len(full_response) // 4
282
+ total_tokens = prompt_tokens + completion_tokens
283
+
284
+ usage = CompletionUsage(
285
+ prompt_tokens=prompt_tokens,
286
+ completion_tokens=completion_tokens,
287
+ total_tokens=total_tokens
288
+ )
289
+
290
+ # Create the completion object
291
+ completion = ChatCompletion(
292
+ id=request_id,
293
+ choices=[choice],
294
+ created=created_time,
295
+ model=model,
296
+ usage=usage,
297
+ )
298
+
299
+ return completion
300
+
301
+ except Exception as e:
302
+ print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
303
+ raise IOError(f"UncovrAI request failed: {e}") from e
304
+
305
+ class Chat(BaseChat):
306
+ def __init__(self, client: 'UncovrAI'):
307
+ self.completions = Completions(client)
308
+
309
+ class UncovrAI(OpenAICompatibleProvider):
310
+ """
311
+ OpenAI-compatible client for Uncovr AI API.
312
+
313
+ Usage:
314
+ client = UncovrAI()
315
+ response = client.chat.completions.create(
316
+ model="default",
317
+ messages=[{"role": "user", "content": "Hello!"}]
318
+ )
319
+ print(response.choices[0].message.content)
320
+ """
321
+
322
+ AVAILABLE_MODELS = [
323
+ "default",
324
+ "gpt-4o-mini",
325
+ "gemini-2-flash",
326
+ "gemini-2-flash-lite",
327
+ "groq-llama-3-1-8b",
328
+ "o3-mini",
329
+ "deepseek-r1-distill-qwen-32b",
330
+ # The following models are not available in the free plan:
331
+ # "claude-3-7-sonnet",
332
+ # "gpt-4o",
333
+ # "claude-3-5-sonnet-v2",
334
+ # "deepseek-r1-distill-llama-70b",
335
+ # "gemini-2-flash-lite-preview",
336
+ # "qwen-qwq-32b"
337
+ ]
338
+
339
+ def __init__(
340
+ self,
341
+ timeout: int = 30,
342
+ browser: str = "chrome",
343
+ chat_id: Optional[str] = None,
344
+ user_id: Optional[str] = None,
345
+ proxies: dict = {}
346
+ ):
347
+ """
348
+ Initialize the UncovrAI client.
349
+
350
+ Args:
351
+ timeout: Request timeout in seconds
352
+ browser: Browser name for LitAgent to generate fingerprint
353
+ chat_id: Optional chat ID (will generate one if not provided)
354
+ user_id: Optional user ID (will generate one if not provided)
355
+ proxies: Optional proxy configuration
356
+ """
357
+ self.url = "https://uncovr.app/api/workflows/chat"
358
+ self.timeout = timeout
359
+
360
+ # Initialize LitAgent for user agent generation
361
+ self.agent = LitAgent()
362
+
363
+ # Use fingerprinting to create a consistent browser identity
364
+ self.fingerprint = self.agent.generate_fingerprint(browser)
365
+
366
+ # Use the fingerprint for headers
367
+ self.headers = {
368
+ "Accept": self.fingerprint["accept"],
369
+ "Accept-Encoding": "gzip, deflate, br, zstd",
370
+ "Accept-Language": self.fingerprint["accept_language"],
371
+ "Content-Type": "application/json",
372
+ "Origin": "https://uncovr.app",
373
+ "Referer": "https://uncovr.app/",
374
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
375
+ "Sec-CH-UA-Mobile": "?0",
376
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
377
+ "User-Agent": self.fingerprint["user_agent"],
378
+ "Sec-Fetch-Dest": "empty",
379
+ "Sec-Fetch-Mode": "cors",
380
+ "Sec-Fetch-Site": "same-origin"
381
+ }
382
+
383
+ # Use cloudscraper to bypass Cloudflare protection
384
+ self.session = cloudscraper.create_scraper()
385
+ self.session.headers.update(self.headers)
386
+ self.session.proxies.update(proxies)
387
+
388
+ # Set chat and user IDs
389
+ self.chat_id = chat_id or str(uuid.uuid4())
390
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
391
+
392
+ # Initialize chat interface
393
+ self.chat = Chat(self)
394
+
395
+ def refresh_identity(self, browser: str = None):
396
+ """
397
+ Refreshes the browser identity fingerprint.
398
+
399
+ Args:
400
+ browser: Specific browser to use for the new fingerprint
401
+ """
402
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
403
+ self.fingerprint = self.agent.generate_fingerprint(browser)
404
+
405
+ # Update headers with new fingerprint
406
+ self.headers.update({
407
+ "Accept": self.fingerprint["accept"],
408
+ "Accept-Language": self.fingerprint["accept_language"],
409
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
410
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
411
+ "User-Agent": self.fingerprint["user_agent"],
412
+ })
413
+
414
+ # Update session headers
415
+ for header, value in self.headers.items():
416
+ self.session.headers[header] = value
417
+
418
+ return self.fingerprint
419
+
420
+ def format_text(self, text: str) -> str:
421
+ """
422
+ Format text by replacing escaped newlines with actual newlines.
423
+
424
+ Args:
425
+ text: Text to format
426
+
427
+ Returns:
428
+ Formatted text
429
+ """
430
+ # Use a more comprehensive approach to handle all escape sequences
431
+ try:
432
+ # First handle double backslashes to avoid issues
433
+ text = text.replace('\\\\', '\\')
434
+
435
+ # Handle common escape sequences
436
+ text = text.replace('\\n', '\n')
437
+ text = text.replace('\\r', '\r')
438
+ text = text.replace('\\t', '\t')
439
+ text = text.replace('\\"', '"')
440
+ text = text.replace("\\'", "'")
441
+
442
+ # Handle any remaining escape sequences using JSON decoding
443
+ try:
444
+ # Add quotes to make it a valid JSON string
445
+ json_str = f'"{text}"'
446
+ # Use json module to decode all escape sequences
447
+ decoded = json.loads(json_str)
448
+ return decoded
449
+ except json.JSONDecodeError:
450
+ # If JSON decoding fails, return the text with the replacements we've already done
451
+ return text
452
+ except Exception as e:
453
+ # If any error occurs, return the original text
454
+ print(f"{RED}Warning: Error formatting text: {e}{RESET}")
455
+ return text
@@ -15,22 +15,7 @@ from .utils import (
15
15
  try:
16
16
  from webscout.litagent import LitAgent
17
17
  except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
- # Return minimal default headers if LitAgent is unavailable
22
- print("Warning: LitAgent not found. Using default minimal headers.")
23
- return {
24
- "accept": "*/*",
25
- "accept_language": "en-US,en;q=0.9",
26
- "platform": "Windows",
27
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
28
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
29
- "browser_type": browser,
30
- }
31
-
32
- def random(self) -> str:
33
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
18
+ print("Warning: LitAgent not found. Some functionality may be limited.")
34
19
 
35
20
  # --- Venice Client ---
36
21
 
@@ -0,0 +1,156 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ print("Warning: LitAgent not found. Using default user agent.")
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'Writecream'):
22
+ self._client = client
23
+
24
+ def create(
25
+ *,
26
+ self,
27
+ model: str = None, # Not used by Writecream, for compatibility
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = None, # Not used by Writecream
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None, # Not used by Writecream
32
+ top_p: Optional[float] = None, # Not used by Writecream
33
+ **kwargs: Any
34
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
+ """
36
+ Creates a model response for the given chat conversation.
37
+ Mimics openai.chat.completions.create
38
+ """
39
+ payload = messages
40
+ request_id = f"chatcmpl-{uuid.uuid4()}"
41
+ created_time = int(time.time())
42
+ if stream:
43
+ return self._create_stream(request_id, created_time, payload)
44
+ else:
45
+ return self._create_non_stream(request_id, created_time, payload)
46
+
47
+ def _create_stream(
48
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]]
49
+ ) -> Generator[ChatCompletionChunk, None, None]:
50
+ # Writecream does not support streaming, so yield the full response as a single chunk
51
+ completion = self._create_non_stream(request_id, created_time, payload)
52
+ content = completion.choices[0].message.content
53
+ # Yield as a single chunk
54
+ delta = ChoiceDelta(content=content)
55
+ choice = Choice(index=0, delta=delta, finish_reason=None)
56
+ chunk = ChatCompletionChunk(
57
+ id=request_id,
58
+ choices=[choice],
59
+ created=created_time,
60
+ model="writecream",
61
+ )
62
+ yield chunk
63
+ # Final chunk with finish_reason
64
+ delta = ChoiceDelta(content=None)
65
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
66
+ chunk = ChatCompletionChunk(
67
+ id=request_id,
68
+ choices=[choice],
69
+ created=created_time,
70
+ model="writecream",
71
+ )
72
+ yield chunk
73
+
74
+ def _create_non_stream(
75
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]]
76
+ ) -> ChatCompletion:
77
+ try:
78
+ params = {
79
+ "query": json.dumps(payload),
80
+ "link": "writecream.com"
81
+ }
82
+ response = self._client.session.get(
83
+ self._client.base_url,
84
+ params=params,
85
+ headers=self._client.headers,
86
+ timeout=self._client.timeout
87
+ )
88
+ response.raise_for_status()
89
+ data = response.json()
90
+ # Extract the response content according to the new API format
91
+ content = data.get("response_content", "")
92
+ # Estimate tokens
93
+ prompt_tokens = sum(len(m.get("content", "").split()) for m in payload)
94
+ completion_tokens = len(content.split())
95
+ usage = CompletionUsage(
96
+ prompt_tokens=prompt_tokens,
97
+ completion_tokens=completion_tokens,
98
+ total_tokens=prompt_tokens + completion_tokens
99
+ )
100
+ message = ChatCompletionMessage(role="assistant", content=content)
101
+ choice = Choice(index=0, message=message, finish_reason="stop")
102
+ completion = ChatCompletion(
103
+ id=request_id,
104
+ choices=[choice],
105
+ created=created_time,
106
+ model="writecream",
107
+ usage=usage
108
+ )
109
+ return completion
110
+ except Exception as e:
111
+ print(f"Error during Writecream request: {e}")
112
+ raise IOError(f"Writecream request failed: {e}") from e
113
+
114
+ class Chat(BaseChat):
115
+ def __init__(self, client: 'Writecream'):
116
+ self.completions = Completions(client)
117
+
118
+ class Writecream(OpenAICompatibleProvider):
119
+ """
120
+ OpenAI-compatible client for Writecream API.
121
+
122
+ Usage:
123
+ client = Writecream()
124
+ response = client.chat.completions.create(
125
+ messages=[{"role": "system", "content": "You are a helpful assistant."},
126
+ {"role": "user", "content": "What is the capital of France?"}]
127
+ )
128
+ print(response.choices[0].message.content)
129
+ """
130
+ AVAILABLE_MODELS = ["writecream"]
131
+
132
+ def __init__(self, timeout: Optional[int] = 30, browser: str = "chrome"):
133
+ self.timeout = timeout
134
+ self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
135
+ self.session = requests.Session()
136
+ agent = LitAgent()
137
+ self.headers = {
138
+ "User-Agent": agent.random(),
139
+ "Referer": "https://www.writecream.com/chatgpt-chat/"
140
+ }
141
+ self.session.headers.update(self.headers)
142
+ self.chat = Chat(self)
143
+
144
+ def convert_model_name(self, model: str) -> str:
145
+ return "writecream"
146
+
147
+ # Simple test if run directly
148
+ if __name__ == "__main__":
149
+ client = Writecream()
150
+ response = client.chat.completions.create(
151
+ messages=[
152
+ {"role": "system", "content": "You are a helpful assistant."},
153
+ {"role": "user", "content": "What is the capital of France?"}
154
+ ]
155
+ )
156
+ print(response.choices[0].message.content)
@@ -12,26 +12,8 @@ from .utils import (
12
12
  ChatCompletionMessage, CompletionUsage
13
13
  )
14
14
 
15
- # Attempt to import LitAgent, fallback if not available
16
- try:
17
- from webscout.litagent import LitAgent
18
- except ImportError:
19
- # Define a dummy LitAgent if webscout is not installed or accessible
20
- class LitAgent:
21
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
- # Return minimal default headers if LitAgent is unavailable
23
- print("Warning: LitAgent not found. Using default minimal headers.")
24
- return {
25
- "accept": "*/*",
26
- "accept_language": "en-US,en;q=0.9",
27
- "platform": "Windows",
28
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
- "browser_type": browser,
31
- }
32
-
33
- def random(self) -> str:
34
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
15
+ # Import LitAgent
16
+ from webscout.litagent import LitAgent
35
17
 
36
18
  # --- X0GPT Client ---
37
19