webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,462 +1,477 @@
1
- import time
2
- import uuid
3
- import re
4
- import json
5
- import cloudscraper
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- from webscout.litagent import LitAgent
9
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
- from .utils import (
11
- ChatCompletion,
12
- ChatCompletionChunk,
13
- Choice,
14
- ChatCompletionMessage,
15
- ChoiceDelta,
16
- CompletionUsage,
17
- format_prompt,
18
- get_system_prompt,
19
- get_last_user_message
20
- )
21
-
22
- # ANSI escape codes for formatting
23
- BOLD = "\033[1m"
24
- RED = "\033[91m"
25
- RESET = "\033[0m"
26
-
27
- class Completions(BaseCompletions):
28
- def __init__(self, client: 'UncovrAI'):
29
- self._client = client
30
-
31
- def create(
32
- self,
33
- *,
34
- model: str,
35
- messages: List[Dict[str, str]],
36
- max_tokens: Optional[int] = None,
37
- stream: bool = False,
38
- temperature: Optional[float] = None,
39
- top_p: Optional[float] = None,
40
- **kwargs: Any
41
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
- """
43
- Create a chat completion using the UncovrAI API.
44
-
45
- Args:
46
- model: The model to use for completion
47
- messages: A list of messages in the conversation
48
- max_tokens: Maximum number of tokens to generate
49
- stream: Whether to stream the response
50
- temperature: Controls randomness (mapped to UncovrAI's temperature)
51
- top_p: Controls diversity (not directly used by UncovrAI)
52
- **kwargs: Additional parameters
53
-
54
- Returns:
55
- A ChatCompletion object or a generator of ChatCompletionChunk objects
56
- """
57
- # Validate model
58
- if model not in self._client.AVAILABLE_MODELS:
59
- raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
60
-
61
- # Map temperature to UncovrAI's scale (0-100)
62
- # Default to 32 (medium) if not provided
63
- uncovr_temperature = 32
64
- if temperature is not None:
65
- # Map from 0-1 scale to 0-100 scale
66
- uncovr_temperature = int(temperature * 100)
67
- # Ensure it's within bounds
68
- uncovr_temperature = max(0, min(100, uncovr_temperature))
69
-
70
- # Map creativity from kwargs or use default
71
- creativity = kwargs.get("creativity", "medium")
72
-
73
- # Get focus and tools from kwargs or use defaults
74
- selected_focus = kwargs.get("selected_focus", ["web"])
75
- selected_tools = kwargs.get("selected_tools", ["quick-cards"])
76
-
77
- # Generate request ID and timestamp
78
- request_id = str(uuid.uuid4())
79
- created_time = int(time.time())
80
-
81
- # Format the conversation using utility functions
82
- conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
83
-
84
- # Prepare the request payload
85
- payload = {
86
- "content": conversation_prompt,
87
- "chatId": self._client.chat_id,
88
- "userMessageId": str(uuid.uuid4()),
89
- "ai_config": {
90
- "selectedFocus": selected_focus,
91
- "selectedTools": selected_tools,
92
- "agentId": "chat",
93
- "modelId": model,
94
- "temperature": uncovr_temperature,
95
- "creativity": creativity
96
- }
97
- }
98
-
99
- # Handle streaming response
100
- if stream:
101
- return self._handle_streaming_response(
102
- payload=payload,
103
- model=model,
104
- request_id=request_id,
105
- created_time=created_time
106
- )
107
-
108
- # Handle non-streaming response
109
- return self._handle_non_streaming_response(
110
- payload=payload,
111
- model=model,
112
- request_id=request_id,
113
- created_time=created_time
114
- )
115
-
116
- def _handle_streaming_response(
117
- self,
118
- *,
119
- payload: Dict[str, Any],
120
- model: str,
121
- request_id: str,
122
- created_time: int
123
- ) -> Generator[ChatCompletionChunk, None, None]:
124
- """Handle streaming response from UncovrAI API."""
125
- try:
126
- with self._client.session.post(
127
- self._client.url,
128
- json=payload,
129
- stream=True,
130
- timeout=self._client.timeout
131
- ) as response:
132
- if response.status_code != 200:
133
- # If we get a non-200 response, try refreshing our identity once
134
- if response.status_code in [403, 429]:
135
- self._client.refresh_identity()
136
- # Retry with new identity
137
- with self._client.session.post(
138
- self._client.url,
139
- json=payload,
140
- stream=True,
141
- timeout=self._client.timeout
142
- ) as retry_response:
143
- if not retry_response.ok:
144
- raise IOError(
145
- f"Failed to generate response after identity refresh - "
146
- f"({retry_response.status_code}, {retry_response.reason}) - "
147
- f"{retry_response.text}"
148
- )
149
- response = retry_response
150
- else:
151
- raise IOError(f"Request failed with status code {response.status_code}")
152
-
153
- # Process the streaming response
154
- streaming_text = ""
155
- for line in response.iter_lines():
156
- if line:
157
- try:
158
- line = line.decode('utf-8')
159
-
160
- # Use regex to match content messages
161
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
162
- if content_match: # Content message
163
- content = content_match.group(1)
164
- # Format the content to handle escape sequences
165
- content = self._client.format_text(content)
166
- streaming_text += content
167
-
168
- # Create a chunk for this part of the response
169
- delta = ChoiceDelta(content=content)
170
- choice = Choice(
171
- index=0,
172
- delta=delta,
173
- finish_reason=None
174
- )
175
- chunk = ChatCompletionChunk(
176
- id=request_id,
177
- choices=[choice],
178
- created=created_time,
179
- model=model
180
- )
181
-
182
- yield chunk
183
-
184
- # Check for error messages
185
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
186
- if error_match:
187
- error_msg = error_match.group(1)
188
- raise IOError(f"API Error: {error_msg}")
189
-
190
- except (json.JSONDecodeError, UnicodeDecodeError):
191
- continue
192
-
193
- # Yield a final chunk with finish_reason="stop"
194
- delta = ChoiceDelta()
195
- choice = Choice(
196
- index=0,
197
- delta=delta,
198
- finish_reason="stop"
199
- )
200
- chunk = ChatCompletionChunk(
201
- id=request_id,
202
- choices=[choice],
203
- created=created_time,
204
- model=model
205
- )
206
- yield chunk
207
-
208
- except Exception as e:
209
- print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
210
- raise IOError(f"UncovrAI streaming request failed: {e}") from e
211
-
212
- def _handle_non_streaming_response(
213
- self,
214
- *,
215
- payload: Dict[str, Any],
216
- model: str,
217
- request_id: str,
218
- created_time: int
219
- ) -> ChatCompletion:
220
- """Handle non-streaming response from UncovrAI API."""
221
- try:
222
- response = self._client.session.post(
223
- self._client.url,
224
- json=payload,
225
- timeout=self._client.timeout
226
- )
227
-
228
- if response.status_code != 200:
229
- if response.status_code in [403, 429]:
230
- self._client.refresh_identity()
231
- response = self._client.session.post(
232
- self._client.url,
233
- json=payload,
234
- timeout=self._client.timeout
235
- )
236
- if not response.ok:
237
- raise IOError(
238
- f"Failed to generate response after identity refresh - "
239
- f"({response.status_code}, {response.reason}) - "
240
- f"{response.text}"
241
- )
242
- else:
243
- raise IOError(f"Request failed with status code {response.status_code}")
244
-
245
- full_response = ""
246
- for line in response.iter_lines():
247
- if line:
248
- try:
249
- line = line.decode('utf-8')
250
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
251
- if content_match:
252
- content = content_match.group(1)
253
- full_response += content
254
-
255
- # Check for error messages
256
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
257
- if error_match:
258
- error_msg = error_match.group(1)
259
- raise IOError(f"API Error: {error_msg}")
260
-
261
- except (json.JSONDecodeError, UnicodeDecodeError):
262
- continue
263
-
264
- # Format the full response to handle escape sequences
265
- full_response = self._client.format_text(full_response)
266
-
267
- # Create message, choice, and usage objects
268
- message = ChatCompletionMessage(
269
- role="assistant",
270
- content=full_response
271
- )
272
-
273
- choice = Choice(
274
- index=0,
275
- message=message,
276
- finish_reason="stop"
277
- )
278
-
279
- # Estimate token usage (this is approximate)
280
- prompt_tokens = len(payload["content"]) // 4
281
- completion_tokens = len(full_response) // 4
282
- total_tokens = prompt_tokens + completion_tokens
283
-
284
- usage = CompletionUsage(
285
- prompt_tokens=prompt_tokens,
286
- completion_tokens=completion_tokens,
287
- total_tokens=total_tokens
288
- )
289
-
290
- # Create the completion object
291
- completion = ChatCompletion(
292
- id=request_id,
293
- choices=[choice],
294
- created=created_time,
295
- model=model,
296
- usage=usage,
297
- )
298
-
299
- return completion
300
-
301
- except Exception as e:
302
- print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
303
- raise IOError(f"UncovrAI request failed: {e}") from e
304
-
305
- class Chat(BaseChat):
306
- def __init__(self, client: 'UncovrAI'):
307
- self.completions = Completions(client)
308
-
309
- class UncovrAI(OpenAICompatibleProvider):
310
- """
311
- OpenAI-compatible client for Uncovr AI API.
312
-
313
- Usage:
314
- client = UncovrAI()
315
- response = client.chat.completions.create(
316
- model="default",
317
- messages=[{"role": "user", "content": "Hello!"}]
318
- )
319
- print(response.choices[0].message.content)
320
- """
321
-
322
- AVAILABLE_MODELS = [
323
- "default",
324
- "gpt-4o-mini",
325
- "gemini-2-flash",
326
- "gemini-2-flash-lite",
327
- "groq-llama-3-1-8b",
328
- "o3-mini",
329
- "deepseek-r1-distill-qwen-32b",
330
- # The following models are not available in the free plan:
331
- # "claude-3-7-sonnet",
332
- # "gpt-4o",
333
- # "claude-3-5-sonnet-v2",
334
- # "deepseek-r1-distill-llama-70b",
335
- # "gemini-2-flash-lite-preview",
336
- # "qwen-qwq-32b"
337
- ]
338
-
339
- def __init__(
340
- self,
341
- timeout: int = 30,
342
- browser: str = "chrome",
343
- chat_id: Optional[str] = None,
344
- user_id: Optional[str] = None,
345
- proxies: dict = {}
346
- ):
347
- """
348
- Initialize the UncovrAI client.
349
-
350
- Args:
351
- timeout: Request timeout in seconds
352
- browser: Browser name for LitAgent to generate fingerprint
353
- chat_id: Optional chat ID (will generate one if not provided)
354
- user_id: Optional user ID (will generate one if not provided)
355
- proxies: Optional proxy configuration
356
- """
357
- self.url = "https://uncovr.app/api/workflows/chat"
358
- self.timeout = timeout
359
-
360
- # Initialize LitAgent for user agent generation
361
- self.agent = LitAgent()
362
-
363
- # Use fingerprinting to create a consistent browser identity
364
- self.fingerprint = self.agent.generate_fingerprint(browser)
365
-
366
- # Use the fingerprint for headers
367
- self.headers = {
368
- "Accept": self.fingerprint["accept"],
369
- "Accept-Encoding": "gzip, deflate, br, zstd",
370
- "Accept-Language": self.fingerprint["accept_language"],
371
- "Content-Type": "application/json",
372
- "Origin": "https://uncovr.app",
373
- "Referer": "https://uncovr.app/",
374
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
375
- "Sec-CH-UA-Mobile": "?0",
376
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
377
- "User-Agent": self.fingerprint["user_agent"],
378
- "Sec-Fetch-Dest": "empty",
379
- "Sec-Fetch-Mode": "cors",
380
- "Sec-Fetch-Site": "same-origin"
381
- }
382
-
383
- # Use cloudscraper to bypass Cloudflare protection
384
- self.session = cloudscraper.create_scraper()
385
- self.session.headers.update(self.headers)
386
- self.session.proxies.update(proxies)
387
-
388
- # Set chat and user IDs
389
- self.chat_id = chat_id or str(uuid.uuid4())
390
- self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
391
-
392
- # Initialize chat interface
393
- self.chat = Chat(self)
394
-
395
- def refresh_identity(self, browser: str = None):
396
- """
397
- Refreshes the browser identity fingerprint.
398
-
399
- Args:
400
- browser: Specific browser to use for the new fingerprint
401
- """
402
- browser = browser or self.fingerprint.get("browser_type", "chrome")
403
- self.fingerprint = self.agent.generate_fingerprint(browser)
404
-
405
- # Update headers with new fingerprint
406
- self.headers.update({
407
- "Accept": self.fingerprint["accept"],
408
- "Accept-Language": self.fingerprint["accept_language"],
409
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
410
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
411
- "User-Agent": self.fingerprint["user_agent"],
412
- })
413
-
414
- # Update session headers
415
- for header, value in self.headers.items():
416
- self.session.headers[header] = value
417
-
418
- return self.fingerprint
419
-
420
- def format_text(self, text: str) -> str:
421
- """
422
- Format text by replacing escaped newlines with actual newlines.
423
-
424
- Args:
425
- text: Text to format
426
-
427
- Returns:
428
- Formatted text
429
- """
430
- # Use a more comprehensive approach to handle all escape sequences
431
- try:
432
- # First handle double backslashes to avoid issues
433
- text = text.replace('\\\\', '\\')
434
-
435
- # Handle common escape sequences
436
- text = text.replace('\\n', '\n')
437
- text = text.replace('\\r', '\r')
438
- text = text.replace('\\t', '\t')
439
- text = text.replace('\\"', '"')
440
- text = text.replace("\\'", "'")
441
-
442
- # Handle any remaining escape sequences using JSON decoding
443
- try:
444
- # Add quotes to make it a valid JSON string
445
- json_str = f'"{text}"'
446
- # Use json module to decode all escape sequences
447
- decoded = json.loads(json_str)
448
- return decoded
449
- except json.JSONDecodeError:
450
- # If JSON decoding fails, return the text with the replacements we've already done
451
- return text
452
- except Exception as e:
453
- # If any error occurs, return the original text
454
- print(f"{RED}Warning: Error formatting text: {e}{RESET}")
455
- return text
456
-
457
- @property
458
- def models(self):
459
- class _ModelList:
460
- def list(inner_self):
461
- return type(self).AVAILABLE_MODELS
462
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import re
4
+ import json
5
+ import cloudscraper
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.litagent import LitAgent
9
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
+ from .utils import (
11
+ ChatCompletion,
12
+ ChatCompletionChunk,
13
+ Choice,
14
+ ChatCompletionMessage,
15
+ ChoiceDelta,
16
+ CompletionUsage,
17
+ format_prompt,
18
+ get_system_prompt,
19
+ get_last_user_message,
20
+ count_tokens
21
+ )
22
+
23
+ # ANSI escape codes for formatting
24
+ BOLD = "\033[1m"
25
+ RED = "\033[91m"
26
+ RESET = "\033[0m"
27
+
28
+ class Completions(BaseCompletions):
29
+ def __init__(self, client: 'UncovrAI'):
30
+ self._client = client
31
+
32
+ def create(
33
+ self,
34
+ *,
35
+ model: str,
36
+ messages: List[Dict[str, str]],
37
+ max_tokens: Optional[int] = None,
38
+ stream: bool = False,
39
+ temperature: Optional[float] = None,
40
+ top_p: Optional[float] = None,
41
+ timeout: Optional[int] = None,
42
+ proxies: Optional[Dict[str, str]] = None,
43
+ **kwargs: Any
44
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
45
+ """
46
+ Create a chat completion using the UncovrAI API.
47
+
48
+ Args:
49
+ model: The model to use for completion
50
+ messages: A list of messages in the conversation
51
+ max_tokens: Maximum number of tokens to generate
52
+ stream: Whether to stream the response
53
+ temperature: Controls randomness (mapped to UncovrAI's temperature)
54
+ top_p: Controls diversity (not directly used by UncovrAI)
55
+ **kwargs: Additional parameters
56
+
57
+ Returns:
58
+ A ChatCompletion object or a generator of ChatCompletionChunk objects
59
+ """
60
+ # Validate model
61
+ if model not in self._client.AVAILABLE_MODELS:
62
+ raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
63
+
64
+ # Map temperature to UncovrAI's scale (0-100)
65
+ # Default to 32 (medium) if not provided
66
+ uncovr_temperature = 32
67
+ if temperature is not None:
68
+ # Map from 0-1 scale to 0-100 scale
69
+ uncovr_temperature = int(temperature * 100)
70
+ # Ensure it's within bounds
71
+ uncovr_temperature = max(0, min(100, uncovr_temperature))
72
+
73
+ # Map creativity from kwargs or use default
74
+ creativity = kwargs.get("creativity", "medium")
75
+
76
+ # Get focus and tools from kwargs or use defaults
77
+ selected_focus = kwargs.get("selected_focus", ["web"])
78
+ selected_tools = kwargs.get("selected_tools", ["quick-cards"])
79
+
80
+ # Generate request ID and timestamp
81
+ request_id = str(uuid.uuid4())
82
+ created_time = int(time.time())
83
+
84
+ # Format the conversation using utility functions
85
+ conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
86
+
87
+ # Prepare the request payload
88
+ payload = {
89
+ "content": conversation_prompt,
90
+ "chatId": self._client.chat_id,
91
+ "userMessageId": str(uuid.uuid4()),
92
+ "ai_config": {
93
+ "selectedFocus": selected_focus,
94
+ "selectedTools": selected_tools,
95
+ "agentId": "chat",
96
+ "modelId": model,
97
+ "temperature": uncovr_temperature,
98
+ "creativity": creativity
99
+ }
100
+ }
101
+
102
+ # Handle streaming response
103
+ if stream:
104
+ return self._handle_streaming_response(
105
+ payload=payload,
106
+ model=model,
107
+ request_id=request_id,
108
+ created_time=created_time,
109
+ timeout=timeout,
110
+ proxies=proxies
111
+ )
112
+
113
+ # Handle non-streaming response
114
+ return self._handle_non_streaming_response(
115
+ payload=payload,
116
+ model=model,
117
+ request_id=request_id,
118
+ created_time=created_time,
119
+ timeout=timeout,
120
+ proxies=proxies
121
+ )
122
+
123
+ def _handle_streaming_response(
124
+ self,
125
+ *,
126
+ payload: Dict[str, Any],
127
+ model: str,
128
+ request_id: str,
129
+ created_time: int,
130
+ timeout: Optional[int] = None,
131
+ proxies: Optional[Dict[str, str]] = None
132
+ ) -> Generator[ChatCompletionChunk, None, None]:
133
+ """Handle streaming response from UncovrAI API."""
134
+ try:
135
+ with self._client.session.post(
136
+ self._client.url,
137
+ json=payload,
138
+ stream=True,
139
+ timeout=timeout or self._client.timeout,
140
+ proxies=proxies or getattr(self._client, "proxies", None)
141
+ ) as response:
142
+ if response.status_code != 200:
143
+ # If we get a non-200 response, try refreshing our identity once
144
+ if response.status_code in [403, 429]:
145
+ self._client.refresh_identity()
146
+ # Retry with new identity
147
+ with self._client.session.post(
148
+ self._client.url,
149
+ json=payload,
150
+ stream=True,
151
+ timeout=timeout or self._client.timeout,
152
+ proxies=proxies or getattr(self._client, "proxies", None)
153
+ ) as retry_response:
154
+ if not retry_response.ok:
155
+ raise IOError(
156
+ f"Failed to generate response after identity refresh - "
157
+ f"({retry_response.status_code}, {retry_response.reason}) - "
158
+ f"{retry_response.text}"
159
+ )
160
+ response = retry_response
161
+ else:
162
+ raise IOError(f"Request failed with status code {response.status_code}")
163
+
164
+ # Process the streaming response
165
+ streaming_text = ""
166
+ for line in response.iter_lines():
167
+ if line:
168
+ try:
169
+ line = line.decode('utf-8')
170
+
171
+ # Use regex to match content messages
172
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
173
+ if content_match: # Content message
174
+ content = content_match.group(1)
175
+ # Format the content to handle escape sequences
176
+ content = self._client.format_text(content)
177
+ streaming_text += content
178
+
179
+ # Create a chunk for this part of the response
180
+ delta = ChoiceDelta(content=content)
181
+ choice = Choice(
182
+ index=0,
183
+ delta=delta,
184
+ finish_reason=None
185
+ )
186
+ chunk = ChatCompletionChunk(
187
+ id=request_id,
188
+ choices=[choice],
189
+ created=created_time,
190
+ model=model
191
+ )
192
+
193
+ yield chunk
194
+
195
+ # Check for error messages
196
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
197
+ if error_match:
198
+ error_msg = error_match.group(1)
199
+ raise IOError(f"API Error: {error_msg}")
200
+
201
+ except (json.JSONDecodeError, UnicodeDecodeError):
202
+ continue
203
+
204
+ # Yield a final chunk with finish_reason="stop"
205
+ delta = ChoiceDelta()
206
+ choice = Choice(
207
+ index=0,
208
+ delta=delta,
209
+ finish_reason="stop"
210
+ )
211
+ chunk = ChatCompletionChunk(
212
+ id=request_id,
213
+ choices=[choice],
214
+ created=created_time,
215
+ model=model
216
+ )
217
+ yield chunk
218
+
219
+ except Exception as e:
220
+ print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
221
+ raise IOError(f"UncovrAI streaming request failed: {e}") from e
222
+
223
+ def _handle_non_streaming_response(
224
+ self,
225
+ *,
226
+ payload: Dict[str, Any],
227
+ model: str,
228
+ request_id: str,
229
+ created_time: int,
230
+ timeout: Optional[int] = None,
231
+ proxies: Optional[Dict[str, str]] = None
232
+ ) -> ChatCompletion:
233
+ """Handle non-streaming response from UncovrAI API."""
234
+ try:
235
+ response = self._client.session.post(
236
+ self._client.url,
237
+ json=payload,
238
+ timeout=timeout or self._client.timeout,
239
+ proxies=proxies or getattr(self._client, "proxies", None)
240
+ )
241
+
242
+ if response.status_code != 200:
243
+ if response.status_code in [403, 429]:
244
+ self._client.refresh_identity()
245
+ response = self._client.session.post(
246
+ self._client.url,
247
+ json=payload,
248
+ timeout=timeout or self._client.timeout,
249
+ proxies=proxies or getattr(self._client, "proxies", None)
250
+ )
251
+ if not response.ok:
252
+ raise IOError(
253
+ f"Failed to generate response after identity refresh - "
254
+ f"({response.status_code}, {response.reason}) - "
255
+ f"{response.text}"
256
+ )
257
+ else:
258
+ raise IOError(f"Request failed with status code {response.status_code}")
259
+
260
+ full_response = ""
261
+ for line in response.iter_lines():
262
+ if line:
263
+ try:
264
+ line = line.decode('utf-8')
265
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
266
+ if content_match:
267
+ content = content_match.group(1)
268
+ full_response += content
269
+
270
+ # Check for error messages
271
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
272
+ if error_match:
273
+ error_msg = error_match.group(1)
274
+ raise IOError(f"API Error: {error_msg}")
275
+
276
+ except (json.JSONDecodeError, UnicodeDecodeError):
277
+ continue
278
+
279
+ # Format the full response to handle escape sequences
280
+ full_response = self._client.format_text(full_response)
281
+
282
+ # Create message, choice, and usage objects
283
+ message = ChatCompletionMessage(
284
+ role="assistant",
285
+ content=full_response
286
+ )
287
+
288
+ choice = Choice(
289
+ index=0,
290
+ message=message,
291
+ finish_reason="stop"
292
+ )
293
+
294
+ # Estimate token usage using count_tokens
295
+ prompt_tokens = count_tokens(payload.get("content", ""))
296
+ completion_tokens = count_tokens(full_response)
297
+ total_tokens = prompt_tokens + completion_tokens
298
+
299
+ usage = CompletionUsage(
300
+ prompt_tokens=prompt_tokens,
301
+ completion_tokens=completion_tokens,
302
+ total_tokens=total_tokens
303
+ )
304
+
305
+ # Create the completion object
306
+ completion = ChatCompletion(
307
+ id=request_id,
308
+ choices=[choice],
309
+ created=created_time,
310
+ model=model,
311
+ usage=usage,
312
+ )
313
+
314
+ return completion
315
+
316
+ except Exception as e:
317
+ print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
318
+ raise IOError(f"UncovrAI request failed: {e}") from e
319
+
320
+ class Chat(BaseChat):
321
+ def __init__(self, client: 'UncovrAI'):
322
+ self.completions = Completions(client)
323
+
324
+ class UncovrAI(OpenAICompatibleProvider):
325
+ """
326
+ OpenAI-compatible client for Uncovr AI API.
327
+
328
+ Usage:
329
+ client = UncovrAI()
330
+ response = client.chat.completions.create(
331
+ model="default",
332
+ messages=[{"role": "user", "content": "Hello!"}]
333
+ )
334
+ print(response.choices[0].message.content)
335
+ """
336
+
337
+ AVAILABLE_MODELS = [
338
+ "default",
339
+ "gpt-4o-mini",
340
+ "gemini-2-flash",
341
+ "gemini-2-flash-lite",
342
+ "groq-llama-3-1-8b",
343
+ "o3-mini",
344
+ "deepseek-r1-distill-qwen-32b",
345
+ # The following models are not available in the free plan:
346
+ # "claude-3-7-sonnet",
347
+ # "gpt-4o",
348
+ # "claude-3-5-sonnet-v2",
349
+ # "deepseek-r1-distill-llama-70b",
350
+ # "gemini-2-flash-lite-preview",
351
+ # "qwen-qwq-32b"
352
+ ]
353
+
354
+ def __init__(
355
+ self,
356
+ timeout: int = 30,
357
+ browser: str = "chrome",
358
+ chat_id: Optional[str] = None,
359
+ user_id: Optional[str] = None,
360
+ proxies: dict = {}
361
+ ):
362
+ """
363
+ Initialize the UncovrAI client.
364
+
365
+ Args:
366
+ timeout: Request timeout in seconds
367
+ browser: Browser name for LitAgent to generate fingerprint
368
+ chat_id: Optional chat ID (will generate one if not provided)
369
+ user_id: Optional user ID (will generate one if not provided)
370
+ proxies: Optional proxy configuration
371
+ """
372
+ self.url = "https://uncovr.app/api/workflows/chat"
373
+ self.timeout = timeout
374
+
375
+ # Initialize LitAgent for user agent generation
376
+ self.agent = LitAgent()
377
+
378
+ # Use fingerprinting to create a consistent browser identity
379
+ self.fingerprint = self.agent.generate_fingerprint(browser)
380
+
381
+ # Use the fingerprint for headers
382
+ self.headers = {
383
+ "Accept": self.fingerprint["accept"],
384
+ "Accept-Encoding": "gzip, deflate, br, zstd",
385
+ "Accept-Language": self.fingerprint["accept_language"],
386
+ "Content-Type": "application/json",
387
+ "Origin": "https://uncovr.app",
388
+ "Referer": "https://uncovr.app/",
389
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
390
+ "Sec-CH-UA-Mobile": "?0",
391
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
392
+ "User-Agent": self.fingerprint["user_agent"],
393
+ "Sec-Fetch-Dest": "empty",
394
+ "Sec-Fetch-Mode": "cors",
395
+ "Sec-Fetch-Site": "same-origin"
396
+ }
397
+
398
+ # Use cloudscraper to bypass Cloudflare protection
399
+ self.session = cloudscraper.create_scraper()
400
+ self.session.headers.update(self.headers)
401
+ self.session.proxies.update(proxies)
402
+
403
+ # Set chat and user IDs
404
+ self.chat_id = chat_id or str(uuid.uuid4())
405
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
406
+
407
+ # Initialize chat interface
408
+ self.chat = Chat(self)
409
+
410
+ def refresh_identity(self, browser: str = None):
411
+ """
412
+ Refreshes the browser identity fingerprint.
413
+
414
+ Args:
415
+ browser: Specific browser to use for the new fingerprint
416
+ """
417
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
418
+ self.fingerprint = self.agent.generate_fingerprint(browser)
419
+
420
+ # Update headers with new fingerprint
421
+ self.headers.update({
422
+ "Accept": self.fingerprint["accept"],
423
+ "Accept-Language": self.fingerprint["accept_language"],
424
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
425
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
426
+ "User-Agent": self.fingerprint["user_agent"],
427
+ })
428
+
429
+ # Update session headers
430
+ for header, value in self.headers.items():
431
+ self.session.headers[header] = value
432
+
433
+ return self.fingerprint
434
+
435
+ def format_text(self, text: str) -> str:
436
+ """
437
+ Format text by replacing escaped newlines with actual newlines.
438
+
439
+ Args:
440
+ text: Text to format
441
+
442
+ Returns:
443
+ Formatted text
444
+ """
445
+ # Use a more comprehensive approach to handle all escape sequences
446
+ try:
447
+ # First handle double backslashes to avoid issues
448
+ text = text.replace('\\\\', '\\')
449
+
450
+ # Handle common escape sequences
451
+ text = text.replace('\\n', '\n')
452
+ text = text.replace('\\r', '\r')
453
+ text = text.replace('\\t', '\t')
454
+ text = text.replace('\\"', '"')
455
+ text = text.replace("\\'", "'")
456
+
457
+ # Handle any remaining escape sequences using JSON decoding
458
+ try:
459
+ # Add quotes to make it a valid JSON string
460
+ json_str = f'"{text}"'
461
+ # Use json module to decode all escape sequences
462
+ decoded = json.loads(json_str)
463
+ return decoded
464
+ except json.JSONDecodeError:
465
+ # If JSON decoding fails, return the text with the replacements we've already done
466
+ return text
467
+ except Exception as e:
468
+ # If any error occurs, return the original text
469
+ print(f"{RED}Warning: Error formatting text: {e}{RESET}")
470
+ return text
471
+
472
+ @property
473
+ def models(self):
474
+ class _ModelList:
475
+ def list(inner_self):
476
+ return type(self).AVAILABLE_MODELS
477
+ return _ModelList()