webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,433 +1,433 @@
1
- import json
2
- import time
3
- import uuid
4
- import re
5
- import cloudscraper
6
- from datetime import datetime
7
- from typing import List, Dict, Optional, Union, Generator, Any
8
-
9
-
10
- # Import base classes and utility structures
11
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
- from .utils import (
13
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
- ChatCompletionMessage, CompletionUsage,
15
- format_prompt, get_system_prompt, get_last_user_message
16
- )
17
-
18
- # Import LitAgent for browser fingerprinting
19
- try:
20
- from webscout.litagent import LitAgent
21
- except ImportError:
22
- # Define a dummy LitAgent if webscout is not installed or accessible
23
- class LitAgent:
24
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
25
- # Return minimal default headers if LitAgent is unavailable
26
- print("Warning: LitAgent not found. Using default minimal headers.")
27
- return {
28
- "accept": "*/*",
29
- "accept_language": "en-US,en;q=0.9",
30
- "platform": "Windows",
31
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
32
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
33
- "browser_type": browser,
34
- }
35
-
36
- # --- StandardInput Client ---
37
-
38
- class Completions(BaseCompletions):
39
- def __init__(self, client: 'StandardInput'):
40
- self._client = client
41
-
42
- def create(
43
- self,
44
- *,
45
- model: str,
46
- messages: List[Dict[str, str]],
47
- max_tokens: Optional[int] = None,
48
- stream: bool = False,
49
- temperature: Optional[float] = None,
50
- top_p: Optional[float] = None,
51
- **kwargs: Any
52
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
- """Create a chat completion."""
54
- # Validate model
55
- if model not in self._client.AVAILABLE_MODELS and model not in self._client.MODEL_MAPPING.values():
56
- raise ValueError(f"Model {model} not supported. Choose from: {list(self._client.AVAILABLE_MODELS)}")
57
-
58
- # Map model name if needed
59
- internal_model = self._client.MODEL_MAPPING.get(model, model)
60
-
61
- # Extract reasoning flag from kwargs
62
- enable_reasoning = kwargs.get("enable_reasoning", False)
63
-
64
- # Prepare request
65
- request_id = str(uuid.uuid4())
66
- created_time = int(time.time())
67
-
68
- # Extract system message and user message using utility functions
69
- system_content = get_system_prompt(messages)
70
- # Format the prompt for debugging purposes
71
- formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
72
- # Uncomment the line below for debugging
73
- # print(f"Formatted prompt:\n{formatted_prompt}")
74
-
75
- # Prepare the request payload
76
- payload = {
77
- "id": request_id,
78
- "messages": [
79
- {"role": "system", "content": system_content},
80
- {"role": "user", "content": formatted_prompt, "parts": [{"type": "text", "text": formatted_prompt}]}
81
- ],
82
- "modelId": internal_model,
83
- "enabledFeatures": ["reasoning"] if enable_reasoning or "reasoning" in internal_model else []
84
- }
85
-
86
- # Handle streaming vs non-streaming
87
- if stream:
88
- return self._stream_request(request_id, created_time, model, payload)
89
- else:
90
- return self._non_stream_request(request_id, created_time, model, payload)
91
-
92
- def _non_stream_request(
93
- self,
94
- request_id: str,
95
- created_time: int,
96
- model: str,
97
- payload: Dict[str, Any]
98
- ) -> ChatCompletion:
99
- """Handle non-streaming request."""
100
- try:
101
- # Make the request
102
- response = self._client.session.post(
103
- self._client.api_endpoint,
104
- cookies=self._client.cookies,
105
- json=payload,
106
- timeout=self._client.timeout
107
- )
108
-
109
- # Check for errors
110
- if response.status_code != 200:
111
- # Try to get response content for better error messages
112
- try:
113
- error_content = response.text
114
- except:
115
- error_content = "<could not read response content>"
116
-
117
- if response.status_code in [403, 429]:
118
- print(f"Received status code {response.status_code}, refreshing identity...")
119
- self._client._refresh_identity()
120
- response = self._client.session.post(
121
- self._client.api_endpoint,
122
- cookies=self._client.cookies,
123
- json=payload,
124
- timeout=self._client.timeout
125
- )
126
- if not response.ok:
127
- raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
128
- print("Identity refreshed successfully.")
129
- else:
130
- raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
131
-
132
- # Process the response
133
- full_response = ""
134
-
135
- # Process the streaming response to get the full text
136
- for line in response.iter_lines(decode_unicode=True):
137
- if line:
138
- try:
139
- # Extract content from the response
140
- match = re.search(r'0:"(.*?)"', line)
141
- if match:
142
- content = match.group(1)
143
- # Format the content to handle escape sequences
144
- content = self._client.format_text(content)
145
- full_response += content
146
- except:
147
- pass
148
-
149
- # Create the response objects
150
- message = ChatCompletionMessage(
151
- role="assistant",
152
- content=full_response
153
- )
154
-
155
- choice = Choice(
156
- index=0,
157
- message=message,
158
- finish_reason="stop"
159
- )
160
-
161
- # Estimate token usage (very rough estimate)
162
- prompt_tokens = len(str(payload).split()) * 2
163
- completion_tokens = len(full_response.split()) * 2
164
- total_tokens = prompt_tokens + completion_tokens
165
-
166
- usage = CompletionUsage(
167
- prompt_tokens=prompt_tokens,
168
- completion_tokens=completion_tokens,
169
- total_tokens=total_tokens
170
- )
171
-
172
- # Create the completion object
173
- completion = ChatCompletion(
174
- id=request_id,
175
- choices=[choice],
176
- created=created_time,
177
- model=model,
178
- usage=usage,
179
- )
180
-
181
- return completion
182
-
183
- except Exception as e:
184
- print(f"Error during StandardInput non-stream request: {e}")
185
- raise IOError(f"StandardInput request failed: {e}") from e
186
-
187
- def _stream_request(
188
- self,
189
- request_id: str,
190
- created_time: int,
191
- model: str,
192
- payload: Dict[str, Any]
193
- ) -> Generator[ChatCompletionChunk, None, None]:
194
- """Handle streaming request."""
195
- try:
196
- # Make the request
197
- response = self._client.session.post(
198
- self._client.api_endpoint,
199
- cookies=self._client.cookies,
200
- json=payload,
201
- stream=True,
202
- timeout=self._client.timeout
203
- )
204
-
205
- # Check for errors
206
- if response.status_code != 200:
207
- # Try to get response content for better error messages
208
- try:
209
- error_content = response.text
210
- except:
211
- error_content = "<could not read response content>"
212
-
213
- if response.status_code in [403, 429]:
214
- print(f"Received status code {response.status_code}, refreshing identity...")
215
- self._client._refresh_identity()
216
- response = self._client.session.post(
217
- self._client.api_endpoint,
218
- cookies=self._client.cookies,
219
- json=payload,
220
- stream=True,
221
- timeout=self._client.timeout
222
- )
223
- if not response.ok:
224
- raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
225
- print("Identity refreshed successfully.")
226
- else:
227
- raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
228
-
229
- # Process the streaming response
230
- for line in response.iter_lines(decode_unicode=True):
231
- if line:
232
- try:
233
- # Extract content from the response
234
- match = re.search(r'0:"(.*?)"', line)
235
- if match:
236
- content = match.group(1)
237
-
238
- # Format the content to handle escape sequences
239
- content = self._client.format_text(content)
240
-
241
- # Create the delta object
242
- delta = ChoiceDelta(content=content)
243
-
244
- # Create the choice object
245
- choice = Choice(
246
- index=0,
247
- delta=delta,
248
- finish_reason=None
249
- )
250
-
251
- # Create the chunk object
252
- chunk = ChatCompletionChunk(
253
- id=request_id,
254
- choices=[choice],
255
- created=created_time,
256
- model=model
257
- )
258
-
259
- yield chunk
260
- except:
261
- pass
262
-
263
- # Send the final chunk with finish_reason
264
- final_choice = Choice(
265
- index=0,
266
- delta=ChoiceDelta(content=None),
267
- finish_reason="stop"
268
- )
269
-
270
- final_chunk = ChatCompletionChunk(
271
- id=request_id,
272
- choices=[final_choice],
273
- created=created_time,
274
- model=model
275
- )
276
-
277
- yield final_chunk
278
-
279
- except Exception as e:
280
- print(f"Error during StandardInput stream request: {e}")
281
- raise IOError(f"StandardInput request failed: {e}") from e
282
-
283
- class Chat(BaseChat):
284
- def __init__(self, client: 'StandardInput'):
285
- self.completions = Completions(client)
286
-
287
- class StandardInput(OpenAICompatibleProvider):
288
- """
289
- OpenAI-compatible client for StandardInput API.
290
-
291
- Usage:
292
- client = StandardInput()
293
- response = client.chat.completions.create(
294
- model="standard-quick",
295
- messages=[{"role": "user", "content": "Hello!"}]
296
- )
297
- print(response.choices[0].message.content)
298
- """
299
-
300
- AVAILABLE_MODELS = [
301
- "standard-quick",
302
- "standard-reasoning",
303
- ]
304
-
305
- # Map external model names to internal model IDs
306
- MODEL_MAPPING = {
307
- "standard-quick": "quick",
308
- "standard-reasoning": "quick", # Same model but with reasoning enabled
309
- }
310
-
311
- def __init__(
312
- self,
313
- timeout: int = 30,
314
- browser: str = "chrome"
315
- ):
316
- """
317
- Initialize the StandardInput client.
318
-
319
- Args:
320
- timeout: Request timeout in seconds.
321
- browser: Browser name for LitAgent to generate User-Agent.
322
- """
323
- self.timeout = timeout
324
- self.api_endpoint = "https://chat.standard-input.com/api/chat"
325
-
326
- # Initialize LitAgent for user agent generation
327
- self.agent = LitAgent()
328
- # Use fingerprinting to create a consistent browser identity
329
- self.fingerprint = self.agent.generate_fingerprint(browser)
330
-
331
- # Use the fingerprint for headers
332
- self.headers = {
333
- "accept": "*/*",
334
- "accept-encoding": "gzip, deflate, br, zstd",
335
- "accept-language": self.fingerprint["accept_language"],
336
- "content-type": "application/json",
337
- "dnt": "1",
338
- "origin": "https://chat.standard-input.com",
339
- "referer": "https://chat.standard-input.com/",
340
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
341
- "sec-ch-ua-mobile": "?0",
342
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
343
- "sec-fetch-dest": "empty",
344
- "sec-fetch-mode": "cors",
345
- "sec-fetch-site": "same-origin",
346
- "sec-gpc": "1",
347
- "user-agent": self.fingerprint["user_agent"],
348
- }
349
-
350
- # Default cookies - these should be updated for production use
351
- self.cookies = {
352
- "auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
353
- "auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
354
- "sidebar:state": "true",
355
- "ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
356
- }
357
-
358
- # Initialize session with cloudscraper for better handling of Cloudflare protection
359
- self.session = cloudscraper.create_scraper()
360
- self.session.headers.update(self.headers)
361
-
362
- # Initialize chat interface
363
- self.chat = Chat(self)
364
-
365
- def _refresh_identity(self, browser: str = None):
366
- """
367
- Refreshes the browser identity fingerprint.
368
-
369
- Args:
370
- browser: Specific browser to use for the new fingerprint
371
- """
372
- browser = browser or self.fingerprint.get("browser_type", "chrome")
373
- self.fingerprint = self.agent.generate_fingerprint(browser)
374
-
375
- # Update headers with new fingerprint
376
- self.headers.update({
377
- "Accept-Language": self.fingerprint["accept_language"],
378
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
379
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
380
- "User-Agent": self.fingerprint["user_agent"],
381
- })
382
-
383
- # Update session headers
384
- for header, value in self.headers.items():
385
- self.session.headers[header] = value
386
-
387
- return self.fingerprint
388
-
389
- def format_text(self, text: str) -> str:
390
- """
391
- Format text by replacing escaped newlines with actual newlines.
392
-
393
- Args:
394
- text: Text to format
395
-
396
- Returns:
397
- Formatted text
398
- """
399
- # Use a more comprehensive approach to handle all escape sequences
400
- try:
401
- # First handle double backslashes to avoid issues
402
- text = text.replace('\\\\', '\\')
403
-
404
- # Handle common escape sequences
405
- text = text.replace('\\n', '\n')
406
- text = text.replace('\\r', '\r')
407
- text = text.replace('\\t', '\t')
408
- text = text.replace('\\"', '"')
409
- text = text.replace("\\'", "'")
410
-
411
- # Handle any remaining escape sequences using JSON decoding
412
- # This is a fallback in case there are other escape sequences
413
- try:
414
- # Add quotes to make it a valid JSON string
415
- json_str = f'"{text}"'
416
- # Use json module to decode all escape sequences
417
- decoded = json.loads(json_str)
418
- return decoded
419
- except json.JSONDecodeError:
420
- # If JSON decoding fails, return the text with the replacements we've already done
421
- return text
422
- except Exception as e:
423
- # If any error occurs, return the original text
424
- print(f"Warning: Error formatting text: {e}")
425
- return text
426
-
427
- @property
428
- def models(self):
429
- class _ModelList:
430
- def list(inner_self):
431
- return type(self).AVAILABLE_MODELS
432
- return _ModelList()
433
-
1
+ import json
2
+ import time
3
+ import uuid
4
+ import re
5
+ import cloudscraper
6
+ from datetime import datetime
7
+ from typing import List, Dict, Optional, Union, Generator, Any
8
+
9
+
10
+ # Import base classes and utility structures
11
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from .utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage,
15
+ format_prompt, get_system_prompt, get_last_user_message, count_tokens
16
+ )
17
+
18
+ # Import LitAgent for browser fingerprinting
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ # Define a dummy LitAgent if webscout is not installed or accessible
23
+ class LitAgent:
24
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
25
+ # Return minimal default headers if LitAgent is unavailable
26
+ print("Warning: LitAgent not found. Using default minimal headers.")
27
+ return {
28
+ "accept": "*/*",
29
+ "accept_language": "en-US,en;q=0.9",
30
+ "platform": "Windows",
31
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
32
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
33
+ "browser_type": browser,
34
+ }
35
+
36
+ # --- StandardInput Client ---
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'StandardInput'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = None,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = None,
50
+ top_p: Optional[float] = None,
51
+ **kwargs: Any
52
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
+ """Create a chat completion."""
54
+ # Validate model
55
+ if model not in self._client.AVAILABLE_MODELS and model not in self._client.MODEL_MAPPING.values():
56
+ raise ValueError(f"Model {model} not supported. Choose from: {list(self._client.AVAILABLE_MODELS)}")
57
+
58
+ # Map model name if needed
59
+ internal_model = self._client.MODEL_MAPPING.get(model, model)
60
+
61
+ # Extract reasoning flag from kwargs
62
+ enable_reasoning = kwargs.get("enable_reasoning", False)
63
+
64
+ # Prepare request
65
+ request_id = str(uuid.uuid4())
66
+ created_time = int(time.time())
67
+
68
+ # Extract system message and user message using utility functions
69
+ system_content = get_system_prompt(messages)
70
+ # Format the prompt for debugging purposes
71
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
72
+ # Uncomment the line below for debugging
73
+ # print(f"Formatted prompt:\n{formatted_prompt}")
74
+
75
+ # Prepare the request payload
76
+ payload = {
77
+ "id": request_id,
78
+ "messages": [
79
+ {"role": "system", "content": system_content},
80
+ {"role": "user", "content": formatted_prompt, "parts": [{"type": "text", "text": formatted_prompt}]}
81
+ ],
82
+ "modelId": internal_model,
83
+ "enabledFeatures": ["reasoning"] if enable_reasoning or "reasoning" in internal_model else []
84
+ }
85
+
86
+ # Handle streaming vs non-streaming
87
+ if stream:
88
+ return self._stream_request(request_id, created_time, model, payload)
89
+ else:
90
+ return self._non_stream_request(request_id, created_time, model, payload)
91
+
92
+ def _non_stream_request(
93
+ self,
94
+ request_id: str,
95
+ created_time: int,
96
+ model: str,
97
+ payload: Dict[str, Any]
98
+ ) -> ChatCompletion:
99
+ """Handle non-streaming request."""
100
+ try:
101
+ # Make the request
102
+ response = self._client.session.post(
103
+ self._client.api_endpoint,
104
+ cookies=self._client.cookies,
105
+ json=payload,
106
+ timeout=self._client.timeout
107
+ )
108
+
109
+ # Check for errors
110
+ if response.status_code != 200:
111
+ # Try to get response content for better error messages
112
+ try:
113
+ error_content = response.text
114
+ except:
115
+ error_content = "<could not read response content>"
116
+
117
+ if response.status_code in [403, 429]:
118
+ print(f"Received status code {response.status_code}, refreshing identity...")
119
+ self._client._refresh_identity()
120
+ response = self._client.session.post(
121
+ self._client.api_endpoint,
122
+ cookies=self._client.cookies,
123
+ json=payload,
124
+ timeout=self._client.timeout
125
+ )
126
+ if not response.ok:
127
+ raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
128
+ print("Identity refreshed successfully.")
129
+ else:
130
+ raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
131
+
132
+ # Process the response
133
+ full_response = ""
134
+
135
+ # Process the streaming response to get the full text
136
+ for line in response.iter_lines(decode_unicode=True):
137
+ if line:
138
+ try:
139
+ # Extract content from the response
140
+ match = re.search(r'0:"(.*?)"', line)
141
+ if match:
142
+ content = match.group(1)
143
+ # Format the content to handle escape sequences
144
+ content = self._client.format_text(content)
145
+ full_response += content
146
+ except:
147
+ pass
148
+
149
+ # Create the response objects
150
+ message = ChatCompletionMessage(
151
+ role="assistant",
152
+ content=full_response
153
+ )
154
+
155
+ choice = Choice(
156
+ index=0,
157
+ message=message,
158
+ finish_reason="stop"
159
+ )
160
+
161
+ # Estimate token usage (very rough estimate)
162
+ prompt_tokens = count_tokens(str(payload))
163
+ completion_tokens = count_tokens(full_response)
164
+ total_tokens = prompt_tokens + completion_tokens
165
+
166
+ usage = CompletionUsage(
167
+ prompt_tokens=prompt_tokens,
168
+ completion_tokens=completion_tokens,
169
+ total_tokens=total_tokens
170
+ )
171
+
172
+ # Create the completion object
173
+ completion = ChatCompletion(
174
+ id=request_id,
175
+ choices=[choice],
176
+ created=created_time,
177
+ model=model,
178
+ usage=usage,
179
+ )
180
+
181
+ return completion
182
+
183
+ except Exception as e:
184
+ print(f"Error during StandardInput non-stream request: {e}")
185
+ raise IOError(f"StandardInput request failed: {e}") from e
186
+
187
+ def _stream_request(
188
+ self,
189
+ request_id: str,
190
+ created_time: int,
191
+ model: str,
192
+ payload: Dict[str, Any]
193
+ ) -> Generator[ChatCompletionChunk, None, None]:
194
+ """Handle streaming request."""
195
+ try:
196
+ # Make the request
197
+ response = self._client.session.post(
198
+ self._client.api_endpoint,
199
+ cookies=self._client.cookies,
200
+ json=payload,
201
+ stream=True,
202
+ timeout=self._client.timeout
203
+ )
204
+
205
+ # Check for errors
206
+ if response.status_code != 200:
207
+ # Try to get response content for better error messages
208
+ try:
209
+ error_content = response.text
210
+ except:
211
+ error_content = "<could not read response content>"
212
+
213
+ if response.status_code in [403, 429]:
214
+ print(f"Received status code {response.status_code}, refreshing identity...")
215
+ self._client._refresh_identity()
216
+ response = self._client.session.post(
217
+ self._client.api_endpoint,
218
+ cookies=self._client.cookies,
219
+ json=payload,
220
+ stream=True,
221
+ timeout=self._client.timeout
222
+ )
223
+ if not response.ok:
224
+ raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
225
+ print("Identity refreshed successfully.")
226
+ else:
227
+ raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
228
+
229
+ # Process the streaming response
230
+ for line in response.iter_lines(decode_unicode=True):
231
+ if line:
232
+ try:
233
+ # Extract content from the response
234
+ match = re.search(r'0:"(.*?)"', line)
235
+ if match:
236
+ content = match.group(1)
237
+
238
+ # Format the content to handle escape sequences
239
+ content = self._client.format_text(content)
240
+
241
+ # Create the delta object
242
+ delta = ChoiceDelta(content=content)
243
+
244
+ # Create the choice object
245
+ choice = Choice(
246
+ index=0,
247
+ delta=delta,
248
+ finish_reason=None
249
+ )
250
+
251
+ # Create the chunk object
252
+ chunk = ChatCompletionChunk(
253
+ id=request_id,
254
+ choices=[choice],
255
+ created=created_time,
256
+ model=model
257
+ )
258
+
259
+ yield chunk
260
+ except:
261
+ pass
262
+
263
+ # Send the final chunk with finish_reason
264
+ final_choice = Choice(
265
+ index=0,
266
+ delta=ChoiceDelta(content=None),
267
+ finish_reason="stop"
268
+ )
269
+
270
+ final_chunk = ChatCompletionChunk(
271
+ id=request_id,
272
+ choices=[final_choice],
273
+ created=created_time,
274
+ model=model
275
+ )
276
+
277
+ yield final_chunk
278
+
279
+ except Exception as e:
280
+ print(f"Error during StandardInput stream request: {e}")
281
+ raise IOError(f"StandardInput request failed: {e}") from e
282
+
283
+ class Chat(BaseChat):
284
+ def __init__(self, client: 'StandardInput'):
285
+ self.completions = Completions(client)
286
+
287
+ class StandardInput(OpenAICompatibleProvider):
288
+ """
289
+ OpenAI-compatible client for StandardInput API.
290
+
291
+ Usage:
292
+ client = StandardInput()
293
+ response = client.chat.completions.create(
294
+ model="standard-quick",
295
+ messages=[{"role": "user", "content": "Hello!"}]
296
+ )
297
+ print(response.choices[0].message.content)
298
+ """
299
+
300
+ AVAILABLE_MODELS = [
301
+ "standard-quick",
302
+ "standard-reasoning",
303
+ ]
304
+
305
+ # Map external model names to internal model IDs
306
+ MODEL_MAPPING = {
307
+ "standard-quick": "quick",
308
+ "standard-reasoning": "quick", # Same model but with reasoning enabled
309
+ }
310
+
311
+ def __init__(
312
+ self,
313
+ timeout: int = 30,
314
+ browser: str = "chrome"
315
+ ):
316
+ """
317
+ Initialize the StandardInput client.
318
+
319
+ Args:
320
+ timeout: Request timeout in seconds.
321
+ browser: Browser name for LitAgent to generate User-Agent.
322
+ """
323
+ self.timeout = timeout
324
+ self.api_endpoint = "https://chat.standard-input.com/api/chat"
325
+
326
+ # Initialize LitAgent for user agent generation
327
+ self.agent = LitAgent()
328
+ # Use fingerprinting to create a consistent browser identity
329
+ self.fingerprint = self.agent.generate_fingerprint(browser)
330
+
331
+ # Use the fingerprint for headers
332
+ self.headers = {
333
+ "accept": "*/*",
334
+ "accept-encoding": "gzip, deflate, br, zstd",
335
+ "accept-language": self.fingerprint["accept_language"],
336
+ "content-type": "application/json",
337
+ "dnt": "1",
338
+ "origin": "https://chat.standard-input.com",
339
+ "referer": "https://chat.standard-input.com/",
340
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
341
+ "sec-ch-ua-mobile": "?0",
342
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
343
+ "sec-fetch-dest": "empty",
344
+ "sec-fetch-mode": "cors",
345
+ "sec-fetch-site": "same-origin",
346
+ "sec-gpc": "1",
347
+ "user-agent": self.fingerprint["user_agent"],
348
+ }
349
+
350
+ # Default cookies - these should be updated for production use
351
+ self.cookies = {
352
+ "auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
353
+ "auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
354
+ "sidebar:state": "true",
355
+ "ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
356
+ }
357
+
358
+ # Initialize session with cloudscraper for better handling of Cloudflare protection
359
+ self.session = cloudscraper.create_scraper()
360
+ self.session.headers.update(self.headers)
361
+
362
+ # Initialize chat interface
363
+ self.chat = Chat(self)
364
+
365
+ def _refresh_identity(self, browser: str = None):
366
+ """
367
+ Refreshes the browser identity fingerprint.
368
+
369
+ Args:
370
+ browser: Specific browser to use for the new fingerprint
371
+ """
372
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
373
+ self.fingerprint = self.agent.generate_fingerprint(browser)
374
+
375
+ # Update headers with new fingerprint
376
+ self.headers.update({
377
+ "Accept-Language": self.fingerprint["accept_language"],
378
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
379
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
380
+ "User-Agent": self.fingerprint["user_agent"],
381
+ })
382
+
383
+ # Update session headers
384
+ for header, value in self.headers.items():
385
+ self.session.headers[header] = value
386
+
387
+ return self.fingerprint
388
+
389
+ def format_text(self, text: str) -> str:
390
+ """
391
+ Format text by replacing escaped newlines with actual newlines.
392
+
393
+ Args:
394
+ text: Text to format
395
+
396
+ Returns:
397
+ Formatted text
398
+ """
399
+ # Use a more comprehensive approach to handle all escape sequences
400
+ try:
401
+ # First handle double backslashes to avoid issues
402
+ text = text.replace('\\\\', '\\')
403
+
404
+ # Handle common escape sequences
405
+ text = text.replace('\\n', '\n')
406
+ text = text.replace('\\r', '\r')
407
+ text = text.replace('\\t', '\t')
408
+ text = text.replace('\\"', '"')
409
+ text = text.replace("\\'", "'")
410
+
411
+ # Handle any remaining escape sequences using JSON decoding
412
+ # This is a fallback in case there are other escape sequences
413
+ try:
414
+ # Add quotes to make it a valid JSON string
415
+ json_str = f'"{text}"'
416
+ # Use json module to decode all escape sequences
417
+ decoded = json.loads(json_str)
418
+ return decoded
419
+ except json.JSONDecodeError:
420
+ # If JSON decoding fails, return the text with the replacements we've already done
421
+ return text
422
+ except Exception as e:
423
+ # If any error occurs, return the original text
424
+ print(f"Warning: Error formatting text: {e}")
425
+ return text
426
+
427
+ @property
428
+ def models(self):
429
+ class _ModelList:
430
+ def list(inner_self):
431
+ return type(self).AVAILABLE_MODELS
432
+ return _ModelList()
433
+