webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,433 +1,442 @@
1
- import json
2
- import time
3
- import uuid
4
- import re
5
- import cloudscraper
6
- from datetime import datetime
7
- from typing import List, Dict, Optional, Union, Generator, Any
8
-
9
-
10
- # Import base classes and utility structures
11
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
- from .utils import (
13
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
- ChatCompletionMessage, CompletionUsage,
15
- format_prompt, get_system_prompt, get_last_user_message
16
- )
17
-
18
- # Import LitAgent for browser fingerprinting
19
- try:
20
- from webscout.litagent import LitAgent
21
- except ImportError:
22
- # Define a dummy LitAgent if webscout is not installed or accessible
23
- class LitAgent:
24
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
25
- # Return minimal default headers if LitAgent is unavailable
26
- print("Warning: LitAgent not found. Using default minimal headers.")
27
- return {
28
- "accept": "*/*",
29
- "accept_language": "en-US,en;q=0.9",
30
- "platform": "Windows",
31
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
32
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
33
- "browser_type": browser,
34
- }
35
-
36
- # --- StandardInput Client ---
37
-
38
- class Completions(BaseCompletions):
39
- def __init__(self, client: 'StandardInput'):
40
- self._client = client
41
-
42
- def create(
43
- self,
44
- *,
45
- model: str,
46
- messages: List[Dict[str, str]],
47
- max_tokens: Optional[int] = None,
48
- stream: bool = False,
49
- temperature: Optional[float] = None,
50
- top_p: Optional[float] = None,
51
- **kwargs: Any
52
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
- """Create a chat completion."""
54
- # Validate model
55
- if model not in self._client.AVAILABLE_MODELS and model not in self._client.MODEL_MAPPING.values():
56
- raise ValueError(f"Model {model} not supported. Choose from: {list(self._client.AVAILABLE_MODELS)}")
57
-
58
- # Map model name if needed
59
- internal_model = self._client.MODEL_MAPPING.get(model, model)
60
-
61
- # Extract reasoning flag from kwargs
62
- enable_reasoning = kwargs.get("enable_reasoning", False)
63
-
64
- # Prepare request
65
- request_id = str(uuid.uuid4())
66
- created_time = int(time.time())
67
-
68
- # Extract system message and user message using utility functions
69
- system_content = get_system_prompt(messages)
70
- # Format the prompt for debugging purposes
71
- formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
72
- # Uncomment the line below for debugging
73
- # print(f"Formatted prompt:\n{formatted_prompt}")
74
-
75
- # Prepare the request payload
76
- payload = {
77
- "id": request_id,
78
- "messages": [
79
- {"role": "system", "content": system_content},
80
- {"role": "user", "content": formatted_prompt, "parts": [{"type": "text", "text": formatted_prompt}]}
81
- ],
82
- "modelId": internal_model,
83
- "enabledFeatures": ["reasoning"] if enable_reasoning or "reasoning" in internal_model else []
84
- }
85
-
86
- # Handle streaming vs non-streaming
87
- if stream:
88
- return self._stream_request(request_id, created_time, model, payload)
89
- else:
90
- return self._non_stream_request(request_id, created_time, model, payload)
91
-
92
- def _non_stream_request(
93
- self,
94
- request_id: str,
95
- created_time: int,
96
- model: str,
97
- payload: Dict[str, Any]
98
- ) -> ChatCompletion:
99
- """Handle non-streaming request."""
100
- try:
101
- # Make the request
102
- response = self._client.session.post(
103
- self._client.api_endpoint,
104
- cookies=self._client.cookies,
105
- json=payload,
106
- timeout=self._client.timeout
107
- )
108
-
109
- # Check for errors
110
- if response.status_code != 200:
111
- # Try to get response content for better error messages
112
- try:
113
- error_content = response.text
114
- except:
115
- error_content = "<could not read response content>"
116
-
117
- if response.status_code in [403, 429]:
118
- print(f"Received status code {response.status_code}, refreshing identity...")
119
- self._client._refresh_identity()
120
- response = self._client.session.post(
121
- self._client.api_endpoint,
122
- cookies=self._client.cookies,
123
- json=payload,
124
- timeout=self._client.timeout
125
- )
126
- if not response.ok:
127
- raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
128
- print("Identity refreshed successfully.")
129
- else:
130
- raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
131
-
132
- # Process the response
133
- full_response = ""
134
-
135
- # Process the streaming response to get the full text
136
- for line in response.iter_lines(decode_unicode=True):
137
- if line:
138
- try:
139
- # Extract content from the response
140
- match = re.search(r'0:"(.*?)"', line)
141
- if match:
142
- content = match.group(1)
143
- # Format the content to handle escape sequences
144
- content = self._client.format_text(content)
145
- full_response += content
146
- except:
147
- pass
148
-
149
- # Create the response objects
150
- message = ChatCompletionMessage(
151
- role="assistant",
152
- content=full_response
153
- )
154
-
155
- choice = Choice(
156
- index=0,
157
- message=message,
158
- finish_reason="stop"
159
- )
160
-
161
- # Estimate token usage (very rough estimate)
162
- prompt_tokens = len(str(payload).split()) * 2
163
- completion_tokens = len(full_response.split()) * 2
164
- total_tokens = prompt_tokens + completion_tokens
165
-
166
- usage = CompletionUsage(
167
- prompt_tokens=prompt_tokens,
168
- completion_tokens=completion_tokens,
169
- total_tokens=total_tokens
170
- )
171
-
172
- # Create the completion object
173
- completion = ChatCompletion(
174
- id=request_id,
175
- choices=[choice],
176
- created=created_time,
177
- model=model,
178
- usage=usage,
179
- )
180
-
181
- return completion
182
-
183
- except Exception as e:
184
- print(f"Error during StandardInput non-stream request: {e}")
185
- raise IOError(f"StandardInput request failed: {e}") from e
186
-
187
- def _stream_request(
188
- self,
189
- request_id: str,
190
- created_time: int,
191
- model: str,
192
- payload: Dict[str, Any]
193
- ) -> Generator[ChatCompletionChunk, None, None]:
194
- """Handle streaming request."""
195
- try:
196
- # Make the request
197
- response = self._client.session.post(
198
- self._client.api_endpoint,
199
- cookies=self._client.cookies,
200
- json=payload,
201
- stream=True,
202
- timeout=self._client.timeout
203
- )
204
-
205
- # Check for errors
206
- if response.status_code != 200:
207
- # Try to get response content for better error messages
208
- try:
209
- error_content = response.text
210
- except:
211
- error_content = "<could not read response content>"
212
-
213
- if response.status_code in [403, 429]:
214
- print(f"Received status code {response.status_code}, refreshing identity...")
215
- self._client._refresh_identity()
216
- response = self._client.session.post(
217
- self._client.api_endpoint,
218
- cookies=self._client.cookies,
219
- json=payload,
220
- stream=True,
221
- timeout=self._client.timeout
222
- )
223
- if not response.ok:
224
- raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
225
- print("Identity refreshed successfully.")
226
- else:
227
- raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
228
-
229
- # Process the streaming response
230
- for line in response.iter_lines(decode_unicode=True):
231
- if line:
232
- try:
233
- # Extract content from the response
234
- match = re.search(r'0:"(.*?)"', line)
235
- if match:
236
- content = match.group(1)
237
-
238
- # Format the content to handle escape sequences
239
- content = self._client.format_text(content)
240
-
241
- # Create the delta object
242
- delta = ChoiceDelta(content=content)
243
-
244
- # Create the choice object
245
- choice = Choice(
246
- index=0,
247
- delta=delta,
248
- finish_reason=None
249
- )
250
-
251
- # Create the chunk object
252
- chunk = ChatCompletionChunk(
253
- id=request_id,
254
- choices=[choice],
255
- created=created_time,
256
- model=model
257
- )
258
-
259
- yield chunk
260
- except:
261
- pass
262
-
263
- # Send the final chunk with finish_reason
264
- final_choice = Choice(
265
- index=0,
266
- delta=ChoiceDelta(content=None),
267
- finish_reason="stop"
268
- )
269
-
270
- final_chunk = ChatCompletionChunk(
271
- id=request_id,
272
- choices=[final_choice],
273
- created=created_time,
274
- model=model
275
- )
276
-
277
- yield final_chunk
278
-
279
- except Exception as e:
280
- print(f"Error during StandardInput stream request: {e}")
281
- raise IOError(f"StandardInput request failed: {e}") from e
282
-
283
- class Chat(BaseChat):
284
- def __init__(self, client: 'StandardInput'):
285
- self.completions = Completions(client)
286
-
287
- class StandardInput(OpenAICompatibleProvider):
288
- """
289
- OpenAI-compatible client for StandardInput API.
290
-
291
- Usage:
292
- client = StandardInput()
293
- response = client.chat.completions.create(
294
- model="standard-quick",
295
- messages=[{"role": "user", "content": "Hello!"}]
296
- )
297
- print(response.choices[0].message.content)
298
- """
299
-
300
- AVAILABLE_MODELS = [
301
- "standard-quick",
302
- "standard-reasoning",
303
- ]
304
-
305
- # Map external model names to internal model IDs
306
- MODEL_MAPPING = {
307
- "standard-quick": "quick",
308
- "standard-reasoning": "quick", # Same model but with reasoning enabled
309
- }
310
-
311
- def __init__(
312
- self,
313
- timeout: int = 30,
314
- browser: str = "chrome"
315
- ):
316
- """
317
- Initialize the StandardInput client.
318
-
319
- Args:
320
- timeout: Request timeout in seconds.
321
- browser: Browser name for LitAgent to generate User-Agent.
322
- """
323
- self.timeout = timeout
324
- self.api_endpoint = "https://chat.standard-input.com/api/chat"
325
-
326
- # Initialize LitAgent for user agent generation
327
- self.agent = LitAgent()
328
- # Use fingerprinting to create a consistent browser identity
329
- self.fingerprint = self.agent.generate_fingerprint(browser)
330
-
331
- # Use the fingerprint for headers
332
- self.headers = {
333
- "accept": "*/*",
334
- "accept-encoding": "gzip, deflate, br, zstd",
335
- "accept-language": self.fingerprint["accept_language"],
336
- "content-type": "application/json",
337
- "dnt": "1",
338
- "origin": "https://chat.standard-input.com",
339
- "referer": "https://chat.standard-input.com/",
340
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
341
- "sec-ch-ua-mobile": "?0",
342
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
343
- "sec-fetch-dest": "empty",
344
- "sec-fetch-mode": "cors",
345
- "sec-fetch-site": "same-origin",
346
- "sec-gpc": "1",
347
- "user-agent": self.fingerprint["user_agent"],
348
- }
349
-
350
- # Default cookies - these should be updated for production use
351
- self.cookies = {
352
- "auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
353
- "auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
354
- "sidebar:state": "true",
355
- "ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
356
- }
357
-
358
- # Initialize session with cloudscraper for better handling of Cloudflare protection
359
- self.session = cloudscraper.create_scraper()
360
- self.session.headers.update(self.headers)
361
-
362
- # Initialize chat interface
363
- self.chat = Chat(self)
364
-
365
- def _refresh_identity(self, browser: str = None):
366
- """
367
- Refreshes the browser identity fingerprint.
368
-
369
- Args:
370
- browser: Specific browser to use for the new fingerprint
371
- """
372
- browser = browser or self.fingerprint.get("browser_type", "chrome")
373
- self.fingerprint = self.agent.generate_fingerprint(browser)
374
-
375
- # Update headers with new fingerprint
376
- self.headers.update({
377
- "Accept-Language": self.fingerprint["accept_language"],
378
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
379
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
380
- "User-Agent": self.fingerprint["user_agent"],
381
- })
382
-
383
- # Update session headers
384
- for header, value in self.headers.items():
385
- self.session.headers[header] = value
386
-
387
- return self.fingerprint
388
-
389
- def format_text(self, text: str) -> str:
390
- """
391
- Format text by replacing escaped newlines with actual newlines.
392
-
393
- Args:
394
- text: Text to format
395
-
396
- Returns:
397
- Formatted text
398
- """
399
- # Use a more comprehensive approach to handle all escape sequences
400
- try:
401
- # First handle double backslashes to avoid issues
402
- text = text.replace('\\\\', '\\')
403
-
404
- # Handle common escape sequences
405
- text = text.replace('\\n', '\n')
406
- text = text.replace('\\r', '\r')
407
- text = text.replace('\\t', '\t')
408
- text = text.replace('\\"', '"')
409
- text = text.replace("\\'", "'")
410
-
411
- # Handle any remaining escape sequences using JSON decoding
412
- # This is a fallback in case there are other escape sequences
413
- try:
414
- # Add quotes to make it a valid JSON string
415
- json_str = f'"{text}"'
416
- # Use json module to decode all escape sequences
417
- decoded = json.loads(json_str)
418
- return decoded
419
- except json.JSONDecodeError:
420
- # If JSON decoding fails, return the text with the replacements we've already done
421
- return text
422
- except Exception as e:
423
- # If any error occurs, return the original text
424
- print(f"Warning: Error formatting text: {e}")
425
- return text
426
-
427
- @property
428
- def models(self):
429
- class _ModelList:
430
- def list(inner_self):
431
- return type(self).AVAILABLE_MODELS
432
- return _ModelList()
433
-
1
+ import json
2
+ import time
3
+ import uuid
4
+ import re
5
+ import cloudscraper
6
+ from datetime import datetime
7
+ from typing import List, Dict, Optional, Union, Generator, Any
8
+
9
+
10
+ # Import base classes and utility structures
11
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from .utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage,
15
+ format_prompt, get_system_prompt, get_last_user_message, count_tokens
16
+ )
17
+
18
+ # Import LitAgent for browser fingerprinting
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ # Define a dummy LitAgent if webscout is not installed or accessible
23
+ class LitAgent:
24
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
25
+ # Return minimal default headers if LitAgent is unavailable
26
+ print("Warning: LitAgent not found. Using default minimal headers.")
27
+ return {
28
+ "accept": "*/*",
29
+ "accept_language": "en-US,en;q=0.9",
30
+ "platform": "Windows",
31
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
32
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
33
+ "browser_type": browser,
34
+ }
35
+
36
+ # --- StandardInput Client ---
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'StandardInput'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = None,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = None,
50
+ top_p: Optional[float] = None,
51
+ timeout: Optional[int] = None,
52
+ proxies: Optional[Dict[str, str]] = None,
53
+ **kwargs: Any
54
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
55
+ """Create a chat completion."""
56
+ # Validate model
57
+ if model not in self._client.AVAILABLE_MODELS and model not in self._client.MODEL_MAPPING.values():
58
+ raise ValueError(f"Model {model} not supported. Choose from: {list(self._client.AVAILABLE_MODELS)}")
59
+
60
+ # Map model name if needed
61
+ internal_model = self._client.MODEL_MAPPING.get(model, model)
62
+
63
+ # Extract reasoning flag from kwargs
64
+ enable_reasoning = kwargs.get("enable_reasoning", False)
65
+
66
+ # Prepare request
67
+ request_id = str(uuid.uuid4())
68
+ created_time = int(time.time())
69
+
70
+ # Extract system message and user message using utility functions
71
+ system_content = get_system_prompt(messages)
72
+ # Format the prompt for debugging purposes
73
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, do_continue=True)
74
+ # Uncomment the line below for debugging
75
+ # print(f"Formatted prompt:\n{formatted_prompt}")
76
+
77
+ # Prepare the request payload
78
+ payload = {
79
+ "id": request_id,
80
+ "messages": [
81
+ {"role": "system", "content": system_content},
82
+ {"role": "user", "content": formatted_prompt, "parts": [{"type": "text", "text": formatted_prompt}]}
83
+ ],
84
+ "modelId": internal_model,
85
+ "enabledFeatures": ["reasoning"] if enable_reasoning or "reasoning" in internal_model else []
86
+ }
87
+
88
+ # Handle streaming vs non-streaming
89
+ if stream:
90
+ return self._stream_request(request_id, created_time, model, payload, timeout, proxies)
91
+ else:
92
+ return self._non_stream_request(request_id, created_time, model, payload, timeout, proxies)
93
+
94
+ def _non_stream_request(
95
+ self,
96
+ request_id: str,
97
+ created_time: int,
98
+ model: str,
99
+ payload: Dict[str, Any],
100
+ timeout: Optional[int] = None,
101
+ proxies: Optional[Dict[str, str]] = None
102
+ ) -> ChatCompletion:
103
+ """Handle non-streaming request."""
104
+ try:
105
+ # Make the request
106
+ response = self._client.session.post(
107
+ self._client.api_endpoint,
108
+ cookies=self._client.cookies,
109
+ json=payload,
110
+ timeout=timeout or self._client.timeout,
111
+ proxies=proxies or getattr(self._client, "proxies", None)
112
+ )
113
+
114
+ # Check for errors
115
+ if response.status_code != 200:
116
+ # Try to get response content for better error messages
117
+ try:
118
+ error_content = response.text
119
+ except:
120
+ error_content = "<could not read response content>"
121
+
122
+ if response.status_code in [403, 429]:
123
+ print(f"Received status code {response.status_code}, refreshing identity...")
124
+ self._client._refresh_identity()
125
+ response = self._client.session.post(
126
+ self._client.api_endpoint,
127
+ cookies=self._client.cookies,
128
+ json=payload,
129
+ timeout=timeout or self._client.timeout,
130
+ proxies=proxies or getattr(self._client, "proxies", None)
131
+ )
132
+ if not response.ok:
133
+ raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
134
+ print("Identity refreshed successfully.")
135
+ else:
136
+ raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
137
+
138
+ # Process the response
139
+ full_response = ""
140
+
141
+ # Process the streaming response to get the full text
142
+ for line in response.iter_lines(decode_unicode=True):
143
+ if line:
144
+ try:
145
+ # Extract content from the response
146
+ match = re.search(r'0:"(.*?)"', line)
147
+ if match:
148
+ content = match.group(1)
149
+ # Format the content to handle escape sequences
150
+ content = self._client.format_text(content)
151
+ full_response += content
152
+ except:
153
+ pass
154
+
155
+ # Create the response objects
156
+ message = ChatCompletionMessage(
157
+ role="assistant",
158
+ content=full_response
159
+ )
160
+
161
+ choice = Choice(
162
+ index=0,
163
+ message=message,
164
+ finish_reason="stop"
165
+ )
166
+
167
+ # Estimate token usage (very rough estimate)
168
+ prompt_tokens = count_tokens(str(payload))
169
+ completion_tokens = count_tokens(full_response)
170
+ total_tokens = prompt_tokens + completion_tokens
171
+
172
+ usage = CompletionUsage(
173
+ prompt_tokens=prompt_tokens,
174
+ completion_tokens=completion_tokens,
175
+ total_tokens=total_tokens
176
+ )
177
+
178
+ # Create the completion object
179
+ completion = ChatCompletion(
180
+ id=request_id,
181
+ choices=[choice],
182
+ created=created_time,
183
+ model=model,
184
+ usage=usage,
185
+ )
186
+
187
+ return completion
188
+
189
+ except Exception as e:
190
+ print(f"Error during StandardInput non-stream request: {e}")
191
+ raise IOError(f"StandardInput request failed: {e}") from e
192
+
193
+ def _stream_request(
194
+ self,
195
+ request_id: str,
196
+ created_time: int,
197
+ model: str,
198
+ payload: Dict[str, Any],
199
+ timeout: Optional[int] = None,
200
+ proxies: Optional[Dict[str, str]] = None
201
+ ) -> Generator[ChatCompletionChunk, None, None]:
202
+ """Handle streaming request."""
203
+ try:
204
+ # Make the request
205
+ response = self._client.session.post(
206
+ self._client.api_endpoint,
207
+ cookies=self._client.cookies,
208
+ json=payload,
209
+ stream=True,
210
+ timeout=timeout or self._client.timeout,
211
+ proxies=proxies or getattr(self._client, "proxies", None)
212
+ )
213
+
214
+ # Check for errors
215
+ if response.status_code != 200:
216
+ # Try to get response content for better error messages
217
+ try:
218
+ error_content = response.text
219
+ except:
220
+ error_content = "<could not read response content>"
221
+
222
+ if response.status_code in [403, 429]:
223
+ print(f"Received status code {response.status_code}, refreshing identity...")
224
+ self._client._refresh_identity()
225
+ response = self._client.session.post(
226
+ self._client.api_endpoint,
227
+ cookies=self._client.cookies,
228
+ json=payload,
229
+ stream=True,
230
+ timeout=timeout or self._client.timeout,
231
+ proxies=proxies or getattr(self._client, "proxies", None)
232
+ )
233
+ if not response.ok:
234
+ raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
235
+ print("Identity refreshed successfully.")
236
+ else:
237
+ raise IOError(f"Request failed with status code {response.status_code}. Response: {error_content}")
238
+
239
+ # Process the streaming response
240
+ for line in response.iter_lines(decode_unicode=True):
241
+ if line:
242
+ try:
243
+ # Extract content from the response
244
+ match = re.search(r'0:"(.*?)"', line)
245
+ if match:
246
+ content = match.group(1)
247
+
248
+ # Format the content to handle escape sequences
249
+ content = self._client.format_text(content)
250
+
251
+ # Create the delta object
252
+ delta = ChoiceDelta(content=content)
253
+
254
+ # Create the choice object
255
+ choice = Choice(
256
+ index=0,
257
+ delta=delta,
258
+ finish_reason=None
259
+ )
260
+
261
+ # Create the chunk object
262
+ chunk = ChatCompletionChunk(
263
+ id=request_id,
264
+ choices=[choice],
265
+ created=created_time,
266
+ model=model
267
+ )
268
+
269
+ yield chunk
270
+ except:
271
+ pass
272
+
273
+ # Send the final chunk with finish_reason
274
+ final_choice = Choice(
275
+ index=0,
276
+ delta=ChoiceDelta(content=None),
277
+ finish_reason="stop"
278
+ )
279
+
280
+ final_chunk = ChatCompletionChunk(
281
+ id=request_id,
282
+ choices=[final_choice],
283
+ created=created_time,
284
+ model=model
285
+ )
286
+
287
+ yield final_chunk
288
+
289
+ except Exception as e:
290
+ print(f"Error during StandardInput stream request: {e}")
291
+ raise IOError(f"StandardInput request failed: {e}") from e
292
+
293
+ class Chat(BaseChat):
294
+ def __init__(self, client: 'StandardInput'):
295
+ self.completions = Completions(client)
296
+
297
+ class StandardInput(OpenAICompatibleProvider):
298
+ """
299
+ OpenAI-compatible client for StandardInput API.
300
+
301
+ Usage:
302
+ client = StandardInput()
303
+ response = client.chat.completions.create(
304
+ model="standard-quick",
305
+ messages=[{"role": "user", "content": "Hello!"}]
306
+ )
307
+ print(response.choices[0].message.content)
308
+ """
309
+
310
+ AVAILABLE_MODELS = [
311
+ "standard-quick",
312
+ "standard-reasoning",
313
+ ]
314
+
315
+ # Map external model names to internal model IDs
316
+ MODEL_MAPPING = {
317
+ "standard-quick": "quick",
318
+ "standard-reasoning": "quick", # Same model but with reasoning enabled
319
+ }
320
+
321
+ def __init__(
322
+ self,
323
+ timeout: int = 30,
324
+ browser: str = "chrome"
325
+ ):
326
+ """
327
+ Initialize the StandardInput client.
328
+
329
+ Args:
330
+ timeout: Request timeout in seconds.
331
+ browser: Browser name for LitAgent to generate User-Agent.
332
+ """
333
+ self.timeout = timeout
334
+ self.api_endpoint = "https://chat.standard-input.com/api/chat"
335
+
336
+ # Initialize LitAgent for user agent generation
337
+ self.agent = LitAgent()
338
+ # Use fingerprinting to create a consistent browser identity
339
+ self.fingerprint = self.agent.generate_fingerprint(browser)
340
+
341
+ # Use the fingerprint for headers
342
+ self.headers = {
343
+ "accept": "*/*",
344
+ "accept-encoding": "gzip, deflate, br, zstd",
345
+ "accept-language": self.fingerprint["accept_language"],
346
+ "content-type": "application/json",
347
+ "dnt": "1",
348
+ "origin": "https://chat.standard-input.com",
349
+ "referer": "https://chat.standard-input.com/",
350
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
351
+ "sec-ch-ua-mobile": "?0",
352
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
353
+ "sec-fetch-dest": "empty",
354
+ "sec-fetch-mode": "cors",
355
+ "sec-fetch-site": "same-origin",
356
+ "sec-gpc": "1",
357
+ "user-agent": self.fingerprint["user_agent"],
358
+ }
359
+
360
+ # Default cookies - these should be updated for production use
361
+ self.cookies = {
362
+ "auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
363
+ "auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
364
+ "sidebar:state": "true",
365
+ "ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
366
+ }
367
+
368
+ # Initialize session with cloudscraper for better handling of Cloudflare protection
369
+ self.session = cloudscraper.create_scraper()
370
+ self.session.headers.update(self.headers)
371
+
372
+ # Initialize chat interface
373
+ self.chat = Chat(self)
374
+
375
+ def _refresh_identity(self, browser: str = None):
376
+ """
377
+ Refreshes the browser identity fingerprint.
378
+
379
+ Args:
380
+ browser: Specific browser to use for the new fingerprint
381
+ """
382
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
383
+ self.fingerprint = self.agent.generate_fingerprint(browser)
384
+
385
+ # Update headers with new fingerprint
386
+ self.headers.update({
387
+ "Accept-Language": self.fingerprint["accept_language"],
388
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
389
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
390
+ "User-Agent": self.fingerprint["user_agent"],
391
+ })
392
+
393
+ # Update session headers
394
+ for header, value in self.headers.items():
395
+ self.session.headers[header] = value
396
+
397
+ return self.fingerprint
398
+
399
+ def format_text(self, text: str) -> str:
400
+ """
401
+ Format text by replacing escaped newlines with actual newlines.
402
+
403
+ Args:
404
+ text: Text to format
405
+
406
+ Returns:
407
+ Formatted text
408
+ """
409
+ # Use a more comprehensive approach to handle all escape sequences
410
+ try:
411
+ # First handle double backslashes to avoid issues
412
+ text = text.replace('\\\\', '\\')
413
+
414
+ # Handle common escape sequences
415
+ text = text.replace('\\n', '\n')
416
+ text = text.replace('\\r', '\r')
417
+ text = text.replace('\\t', '\t')
418
+ text = text.replace('\\"', '"')
419
+ text = text.replace("\\'", "'")
420
+
421
+ # Handle any remaining escape sequences using JSON decoding
422
+ # This is a fallback in case there are other escape sequences
423
+ try:
424
+ # Add quotes to make it a valid JSON string
425
+ json_str = f'"{text}"'
426
+ # Use json module to decode all escape sequences
427
+ decoded = json.loads(json_str)
428
+ return decoded
429
+ except json.JSONDecodeError:
430
+ # If JSON decoding fails, return the text with the replacements we've already done
431
+ return text
432
+ except Exception as e:
433
+ # If any error occurs, return the original text
434
+ print(f"Warning: Error formatting text: {e}")
435
+ return text
436
+
437
+ @property
438
+ def models(self):
439
+ class _ModelList:
440
+ def list(inner_self):
441
+ return type(self).AVAILABLE_MODELS
442
+ return _ModelList()