webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,462 +1,463 @@
1
- import time
2
- import uuid
3
- import re
4
- import json
5
- import cloudscraper
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- from webscout.litagent import LitAgent
9
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
- from .utils import (
11
- ChatCompletion,
12
- ChatCompletionChunk,
13
- Choice,
14
- ChatCompletionMessage,
15
- ChoiceDelta,
16
- CompletionUsage,
17
- format_prompt,
18
- get_system_prompt,
19
- get_last_user_message
20
- )
21
-
22
- # ANSI escape codes for formatting
23
- BOLD = "\033[1m"
24
- RED = "\033[91m"
25
- RESET = "\033[0m"
26
-
27
- class Completions(BaseCompletions):
28
- def __init__(self, client: 'UncovrAI'):
29
- self._client = client
30
-
31
- def create(
32
- self,
33
- *,
34
- model: str,
35
- messages: List[Dict[str, str]],
36
- max_tokens: Optional[int] = None,
37
- stream: bool = False,
38
- temperature: Optional[float] = None,
39
- top_p: Optional[float] = None,
40
- **kwargs: Any
41
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
- """
43
- Create a chat completion using the UncovrAI API.
44
-
45
- Args:
46
- model: The model to use for completion
47
- messages: A list of messages in the conversation
48
- max_tokens: Maximum number of tokens to generate
49
- stream: Whether to stream the response
50
- temperature: Controls randomness (mapped to UncovrAI's temperature)
51
- top_p: Controls diversity (not directly used by UncovrAI)
52
- **kwargs: Additional parameters
53
-
54
- Returns:
55
- A ChatCompletion object or a generator of ChatCompletionChunk objects
56
- """
57
- # Validate model
58
- if model not in self._client.AVAILABLE_MODELS:
59
- raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
60
-
61
- # Map temperature to UncovrAI's scale (0-100)
62
- # Default to 32 (medium) if not provided
63
- uncovr_temperature = 32
64
- if temperature is not None:
65
- # Map from 0-1 scale to 0-100 scale
66
- uncovr_temperature = int(temperature * 100)
67
- # Ensure it's within bounds
68
- uncovr_temperature = max(0, min(100, uncovr_temperature))
69
-
70
- # Map creativity from kwargs or use default
71
- creativity = kwargs.get("creativity", "medium")
72
-
73
- # Get focus and tools from kwargs or use defaults
74
- selected_focus = kwargs.get("selected_focus", ["web"])
75
- selected_tools = kwargs.get("selected_tools", ["quick-cards"])
76
-
77
- # Generate request ID and timestamp
78
- request_id = str(uuid.uuid4())
79
- created_time = int(time.time())
80
-
81
- # Format the conversation using utility functions
82
- conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
83
-
84
- # Prepare the request payload
85
- payload = {
86
- "content": conversation_prompt,
87
- "chatId": self._client.chat_id,
88
- "userMessageId": str(uuid.uuid4()),
89
- "ai_config": {
90
- "selectedFocus": selected_focus,
91
- "selectedTools": selected_tools,
92
- "agentId": "chat",
93
- "modelId": model,
94
- "temperature": uncovr_temperature,
95
- "creativity": creativity
96
- }
97
- }
98
-
99
- # Handle streaming response
100
- if stream:
101
- return self._handle_streaming_response(
102
- payload=payload,
103
- model=model,
104
- request_id=request_id,
105
- created_time=created_time
106
- )
107
-
108
- # Handle non-streaming response
109
- return self._handle_non_streaming_response(
110
- payload=payload,
111
- model=model,
112
- request_id=request_id,
113
- created_time=created_time
114
- )
115
-
116
- def _handle_streaming_response(
117
- self,
118
- *,
119
- payload: Dict[str, Any],
120
- model: str,
121
- request_id: str,
122
- created_time: int
123
- ) -> Generator[ChatCompletionChunk, None, None]:
124
- """Handle streaming response from UncovrAI API."""
125
- try:
126
- with self._client.session.post(
127
- self._client.url,
128
- json=payload,
129
- stream=True,
130
- timeout=self._client.timeout
131
- ) as response:
132
- if response.status_code != 200:
133
- # If we get a non-200 response, try refreshing our identity once
134
- if response.status_code in [403, 429]:
135
- self._client.refresh_identity()
136
- # Retry with new identity
137
- with self._client.session.post(
138
- self._client.url,
139
- json=payload,
140
- stream=True,
141
- timeout=self._client.timeout
142
- ) as retry_response:
143
- if not retry_response.ok:
144
- raise IOError(
145
- f"Failed to generate response after identity refresh - "
146
- f"({retry_response.status_code}, {retry_response.reason}) - "
147
- f"{retry_response.text}"
148
- )
149
- response = retry_response
150
- else:
151
- raise IOError(f"Request failed with status code {response.status_code}")
152
-
153
- # Process the streaming response
154
- streaming_text = ""
155
- for line in response.iter_lines():
156
- if line:
157
- try:
158
- line = line.decode('utf-8')
159
-
160
- # Use regex to match content messages
161
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
162
- if content_match: # Content message
163
- content = content_match.group(1)
164
- # Format the content to handle escape sequences
165
- content = self._client.format_text(content)
166
- streaming_text += content
167
-
168
- # Create a chunk for this part of the response
169
- delta = ChoiceDelta(content=content)
170
- choice = Choice(
171
- index=0,
172
- delta=delta,
173
- finish_reason=None
174
- )
175
- chunk = ChatCompletionChunk(
176
- id=request_id,
177
- choices=[choice],
178
- created=created_time,
179
- model=model
180
- )
181
-
182
- yield chunk
183
-
184
- # Check for error messages
185
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
186
- if error_match:
187
- error_msg = error_match.group(1)
188
- raise IOError(f"API Error: {error_msg}")
189
-
190
- except (json.JSONDecodeError, UnicodeDecodeError):
191
- continue
192
-
193
- # Yield a final chunk with finish_reason="stop"
194
- delta = ChoiceDelta()
195
- choice = Choice(
196
- index=0,
197
- delta=delta,
198
- finish_reason="stop"
199
- )
200
- chunk = ChatCompletionChunk(
201
- id=request_id,
202
- choices=[choice],
203
- created=created_time,
204
- model=model
205
- )
206
- yield chunk
207
-
208
- except Exception as e:
209
- print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
210
- raise IOError(f"UncovrAI streaming request failed: {e}") from e
211
-
212
- def _handle_non_streaming_response(
213
- self,
214
- *,
215
- payload: Dict[str, Any],
216
- model: str,
217
- request_id: str,
218
- created_time: int
219
- ) -> ChatCompletion:
220
- """Handle non-streaming response from UncovrAI API."""
221
- try:
222
- response = self._client.session.post(
223
- self._client.url,
224
- json=payload,
225
- timeout=self._client.timeout
226
- )
227
-
228
- if response.status_code != 200:
229
- if response.status_code in [403, 429]:
230
- self._client.refresh_identity()
231
- response = self._client.session.post(
232
- self._client.url,
233
- json=payload,
234
- timeout=self._client.timeout
235
- )
236
- if not response.ok:
237
- raise IOError(
238
- f"Failed to generate response after identity refresh - "
239
- f"({response.status_code}, {response.reason}) - "
240
- f"{response.text}"
241
- )
242
- else:
243
- raise IOError(f"Request failed with status code {response.status_code}")
244
-
245
- full_response = ""
246
- for line in response.iter_lines():
247
- if line:
248
- try:
249
- line = line.decode('utf-8')
250
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
251
- if content_match:
252
- content = content_match.group(1)
253
- full_response += content
254
-
255
- # Check for error messages
256
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
257
- if error_match:
258
- error_msg = error_match.group(1)
259
- raise IOError(f"API Error: {error_msg}")
260
-
261
- except (json.JSONDecodeError, UnicodeDecodeError):
262
- continue
263
-
264
- # Format the full response to handle escape sequences
265
- full_response = self._client.format_text(full_response)
266
-
267
- # Create message, choice, and usage objects
268
- message = ChatCompletionMessage(
269
- role="assistant",
270
- content=full_response
271
- )
272
-
273
- choice = Choice(
274
- index=0,
275
- message=message,
276
- finish_reason="stop"
277
- )
278
-
279
- # Estimate token usage (this is approximate)
280
- prompt_tokens = len(payload["content"]) // 4
281
- completion_tokens = len(full_response) // 4
282
- total_tokens = prompt_tokens + completion_tokens
283
-
284
- usage = CompletionUsage(
285
- prompt_tokens=prompt_tokens,
286
- completion_tokens=completion_tokens,
287
- total_tokens=total_tokens
288
- )
289
-
290
- # Create the completion object
291
- completion = ChatCompletion(
292
- id=request_id,
293
- choices=[choice],
294
- created=created_time,
295
- model=model,
296
- usage=usage,
297
- )
298
-
299
- return completion
300
-
301
- except Exception as e:
302
- print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
303
- raise IOError(f"UncovrAI request failed: {e}") from e
304
-
305
- class Chat(BaseChat):
306
- def __init__(self, client: 'UncovrAI'):
307
- self.completions = Completions(client)
308
-
309
- class UncovrAI(OpenAICompatibleProvider):
310
- """
311
- OpenAI-compatible client for Uncovr AI API.
312
-
313
- Usage:
314
- client = UncovrAI()
315
- response = client.chat.completions.create(
316
- model="default",
317
- messages=[{"role": "user", "content": "Hello!"}]
318
- )
319
- print(response.choices[0].message.content)
320
- """
321
-
322
- AVAILABLE_MODELS = [
323
- "default",
324
- "gpt-4o-mini",
325
- "gemini-2-flash",
326
- "gemini-2-flash-lite",
327
- "groq-llama-3-1-8b",
328
- "o3-mini",
329
- "deepseek-r1-distill-qwen-32b",
330
- # The following models are not available in the free plan:
331
- # "claude-3-7-sonnet",
332
- # "gpt-4o",
333
- # "claude-3-5-sonnet-v2",
334
- # "deepseek-r1-distill-llama-70b",
335
- # "gemini-2-flash-lite-preview",
336
- # "qwen-qwq-32b"
337
- ]
338
-
339
- def __init__(
340
- self,
341
- timeout: int = 30,
342
- browser: str = "chrome",
343
- chat_id: Optional[str] = None,
344
- user_id: Optional[str] = None,
345
- proxies: dict = {}
346
- ):
347
- """
348
- Initialize the UncovrAI client.
349
-
350
- Args:
351
- timeout: Request timeout in seconds
352
- browser: Browser name for LitAgent to generate fingerprint
353
- chat_id: Optional chat ID (will generate one if not provided)
354
- user_id: Optional user ID (will generate one if not provided)
355
- proxies: Optional proxy configuration
356
- """
357
- self.url = "https://uncovr.app/api/workflows/chat"
358
- self.timeout = timeout
359
-
360
- # Initialize LitAgent for user agent generation
361
- self.agent = LitAgent()
362
-
363
- # Use fingerprinting to create a consistent browser identity
364
- self.fingerprint = self.agent.generate_fingerprint(browser)
365
-
366
- # Use the fingerprint for headers
367
- self.headers = {
368
- "Accept": self.fingerprint["accept"],
369
- "Accept-Encoding": "gzip, deflate, br, zstd",
370
- "Accept-Language": self.fingerprint["accept_language"],
371
- "Content-Type": "application/json",
372
- "Origin": "https://uncovr.app",
373
- "Referer": "https://uncovr.app/",
374
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
375
- "Sec-CH-UA-Mobile": "?0",
376
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
377
- "User-Agent": self.fingerprint["user_agent"],
378
- "Sec-Fetch-Dest": "empty",
379
- "Sec-Fetch-Mode": "cors",
380
- "Sec-Fetch-Site": "same-origin"
381
- }
382
-
383
- # Use cloudscraper to bypass Cloudflare protection
384
- self.session = cloudscraper.create_scraper()
385
- self.session.headers.update(self.headers)
386
- self.session.proxies.update(proxies)
387
-
388
- # Set chat and user IDs
389
- self.chat_id = chat_id or str(uuid.uuid4())
390
- self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
391
-
392
- # Initialize chat interface
393
- self.chat = Chat(self)
394
-
395
- def refresh_identity(self, browser: str = None):
396
- """
397
- Refreshes the browser identity fingerprint.
398
-
399
- Args:
400
- browser: Specific browser to use for the new fingerprint
401
- """
402
- browser = browser or self.fingerprint.get("browser_type", "chrome")
403
- self.fingerprint = self.agent.generate_fingerprint(browser)
404
-
405
- # Update headers with new fingerprint
406
- self.headers.update({
407
- "Accept": self.fingerprint["accept"],
408
- "Accept-Language": self.fingerprint["accept_language"],
409
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
410
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
411
- "User-Agent": self.fingerprint["user_agent"],
412
- })
413
-
414
- # Update session headers
415
- for header, value in self.headers.items():
416
- self.session.headers[header] = value
417
-
418
- return self.fingerprint
419
-
420
- def format_text(self, text: str) -> str:
421
- """
422
- Format text by replacing escaped newlines with actual newlines.
423
-
424
- Args:
425
- text: Text to format
426
-
427
- Returns:
428
- Formatted text
429
- """
430
- # Use a more comprehensive approach to handle all escape sequences
431
- try:
432
- # First handle double backslashes to avoid issues
433
- text = text.replace('\\\\', '\\')
434
-
435
- # Handle common escape sequences
436
- text = text.replace('\\n', '\n')
437
- text = text.replace('\\r', '\r')
438
- text = text.replace('\\t', '\t')
439
- text = text.replace('\\"', '"')
440
- text = text.replace("\\'", "'")
441
-
442
- # Handle any remaining escape sequences using JSON decoding
443
- try:
444
- # Add quotes to make it a valid JSON string
445
- json_str = f'"{text}"'
446
- # Use json module to decode all escape sequences
447
- decoded = json.loads(json_str)
448
- return decoded
449
- except json.JSONDecodeError:
450
- # If JSON decoding fails, return the text with the replacements we've already done
451
- return text
452
- except Exception as e:
453
- # If any error occurs, return the original text
454
- print(f"{RED}Warning: Error formatting text: {e}{RESET}")
455
- return text
456
-
457
- @property
458
- def models(self):
459
- class _ModelList:
460
- def list(inner_self):
461
- return type(self).AVAILABLE_MODELS
462
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import re
4
+ import json
5
+ import cloudscraper
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.litagent import LitAgent
9
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
+ from .utils import (
11
+ ChatCompletion,
12
+ ChatCompletionChunk,
13
+ Choice,
14
+ ChatCompletionMessage,
15
+ ChoiceDelta,
16
+ CompletionUsage,
17
+ format_prompt,
18
+ get_system_prompt,
19
+ get_last_user_message,
20
+ count_tokens
21
+ )
22
+
23
+ # ANSI escape codes for formatting
24
+ BOLD = "\033[1m"
25
+ RED = "\033[91m"
26
+ RESET = "\033[0m"
27
+
28
+ class Completions(BaseCompletions):
29
+ def __init__(self, client: 'UncovrAI'):
30
+ self._client = client
31
+
32
+ def create(
33
+ self,
34
+ *,
35
+ model: str,
36
+ messages: List[Dict[str, str]],
37
+ max_tokens: Optional[int] = None,
38
+ stream: bool = False,
39
+ temperature: Optional[float] = None,
40
+ top_p: Optional[float] = None,
41
+ **kwargs: Any
42
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
+ """
44
+ Create a chat completion using the UncovrAI API.
45
+
46
+ Args:
47
+ model: The model to use for completion
48
+ messages: A list of messages in the conversation
49
+ max_tokens: Maximum number of tokens to generate
50
+ stream: Whether to stream the response
51
+ temperature: Controls randomness (mapped to UncovrAI's temperature)
52
+ top_p: Controls diversity (not directly used by UncovrAI)
53
+ **kwargs: Additional parameters
54
+
55
+ Returns:
56
+ A ChatCompletion object or a generator of ChatCompletionChunk objects
57
+ """
58
+ # Validate model
59
+ if model not in self._client.AVAILABLE_MODELS:
60
+ raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
61
+
62
+ # Map temperature to UncovrAI's scale (0-100)
63
+ # Default to 32 (medium) if not provided
64
+ uncovr_temperature = 32
65
+ if temperature is not None:
66
+ # Map from 0-1 scale to 0-100 scale
67
+ uncovr_temperature = int(temperature * 100)
68
+ # Ensure it's within bounds
69
+ uncovr_temperature = max(0, min(100, uncovr_temperature))
70
+
71
+ # Map creativity from kwargs or use default
72
+ creativity = kwargs.get("creativity", "medium")
73
+
74
+ # Get focus and tools from kwargs or use defaults
75
+ selected_focus = kwargs.get("selected_focus", ["web"])
76
+ selected_tools = kwargs.get("selected_tools", ["quick-cards"])
77
+
78
+ # Generate request ID and timestamp
79
+ request_id = str(uuid.uuid4())
80
+ created_time = int(time.time())
81
+
82
+ # Format the conversation using utility functions
83
+ conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
84
+
85
+ # Prepare the request payload
86
+ payload = {
87
+ "content": conversation_prompt,
88
+ "chatId": self._client.chat_id,
89
+ "userMessageId": str(uuid.uuid4()),
90
+ "ai_config": {
91
+ "selectedFocus": selected_focus,
92
+ "selectedTools": selected_tools,
93
+ "agentId": "chat",
94
+ "modelId": model,
95
+ "temperature": uncovr_temperature,
96
+ "creativity": creativity
97
+ }
98
+ }
99
+
100
+ # Handle streaming response
101
+ if stream:
102
+ return self._handle_streaming_response(
103
+ payload=payload,
104
+ model=model,
105
+ request_id=request_id,
106
+ created_time=created_time
107
+ )
108
+
109
+ # Handle non-streaming response
110
+ return self._handle_non_streaming_response(
111
+ payload=payload,
112
+ model=model,
113
+ request_id=request_id,
114
+ created_time=created_time
115
+ )
116
+
117
+ def _handle_streaming_response(
118
+ self,
119
+ *,
120
+ payload: Dict[str, Any],
121
+ model: str,
122
+ request_id: str,
123
+ created_time: int
124
+ ) -> Generator[ChatCompletionChunk, None, None]:
125
+ """Handle streaming response from UncovrAI API."""
126
+ try:
127
+ with self._client.session.post(
128
+ self._client.url,
129
+ json=payload,
130
+ stream=True,
131
+ timeout=self._client.timeout
132
+ ) as response:
133
+ if response.status_code != 200:
134
+ # If we get a non-200 response, try refreshing our identity once
135
+ if response.status_code in [403, 429]:
136
+ self._client.refresh_identity()
137
+ # Retry with new identity
138
+ with self._client.session.post(
139
+ self._client.url,
140
+ json=payload,
141
+ stream=True,
142
+ timeout=self._client.timeout
143
+ ) as retry_response:
144
+ if not retry_response.ok:
145
+ raise IOError(
146
+ f"Failed to generate response after identity refresh - "
147
+ f"({retry_response.status_code}, {retry_response.reason}) - "
148
+ f"{retry_response.text}"
149
+ )
150
+ response = retry_response
151
+ else:
152
+ raise IOError(f"Request failed with status code {response.status_code}")
153
+
154
+ # Process the streaming response
155
+ streaming_text = ""
156
+ for line in response.iter_lines():
157
+ if line:
158
+ try:
159
+ line = line.decode('utf-8')
160
+
161
+ # Use regex to match content messages
162
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
163
+ if content_match: # Content message
164
+ content = content_match.group(1)
165
+ # Format the content to handle escape sequences
166
+ content = self._client.format_text(content)
167
+ streaming_text += content
168
+
169
+ # Create a chunk for this part of the response
170
+ delta = ChoiceDelta(content=content)
171
+ choice = Choice(
172
+ index=0,
173
+ delta=delta,
174
+ finish_reason=None
175
+ )
176
+ chunk = ChatCompletionChunk(
177
+ id=request_id,
178
+ choices=[choice],
179
+ created=created_time,
180
+ model=model
181
+ )
182
+
183
+ yield chunk
184
+
185
+ # Check for error messages
186
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
187
+ if error_match:
188
+ error_msg = error_match.group(1)
189
+ raise IOError(f"API Error: {error_msg}")
190
+
191
+ except (json.JSONDecodeError, UnicodeDecodeError):
192
+ continue
193
+
194
+ # Yield a final chunk with finish_reason="stop"
195
+ delta = ChoiceDelta()
196
+ choice = Choice(
197
+ index=0,
198
+ delta=delta,
199
+ finish_reason="stop"
200
+ )
201
+ chunk = ChatCompletionChunk(
202
+ id=request_id,
203
+ choices=[choice],
204
+ created=created_time,
205
+ model=model
206
+ )
207
+ yield chunk
208
+
209
+ except Exception as e:
210
+ print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
211
+ raise IOError(f"UncovrAI streaming request failed: {e}") from e
212
+
213
+ def _handle_non_streaming_response(
214
+ self,
215
+ *,
216
+ payload: Dict[str, Any],
217
+ model: str,
218
+ request_id: str,
219
+ created_time: int
220
+ ) -> ChatCompletion:
221
+ """Handle non-streaming response from UncovrAI API."""
222
+ try:
223
+ response = self._client.session.post(
224
+ self._client.url,
225
+ json=payload,
226
+ timeout=self._client.timeout
227
+ )
228
+
229
+ if response.status_code != 200:
230
+ if response.status_code in [403, 429]:
231
+ self._client.refresh_identity()
232
+ response = self._client.session.post(
233
+ self._client.url,
234
+ json=payload,
235
+ timeout=self._client.timeout
236
+ )
237
+ if not response.ok:
238
+ raise IOError(
239
+ f"Failed to generate response after identity refresh - "
240
+ f"({response.status_code}, {response.reason}) - "
241
+ f"{response.text}"
242
+ )
243
+ else:
244
+ raise IOError(f"Request failed with status code {response.status_code}")
245
+
246
+ full_response = ""
247
+ for line in response.iter_lines():
248
+ if line:
249
+ try:
250
+ line = line.decode('utf-8')
251
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
252
+ if content_match:
253
+ content = content_match.group(1)
254
+ full_response += content
255
+
256
+ # Check for error messages
257
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
258
+ if error_match:
259
+ error_msg = error_match.group(1)
260
+ raise IOError(f"API Error: {error_msg}")
261
+
262
+ except (json.JSONDecodeError, UnicodeDecodeError):
263
+ continue
264
+
265
+ # Format the full response to handle escape sequences
266
+ full_response = self._client.format_text(full_response)
267
+
268
+ # Create message, choice, and usage objects
269
+ message = ChatCompletionMessage(
270
+ role="assistant",
271
+ content=full_response
272
+ )
273
+
274
+ choice = Choice(
275
+ index=0,
276
+ message=message,
277
+ finish_reason="stop"
278
+ )
279
+
280
+ # Estimate token usage using count_tokens
281
+ prompt_tokens = count_tokens(payload.get("content", ""))
282
+ completion_tokens = count_tokens(full_response)
283
+ total_tokens = prompt_tokens + completion_tokens
284
+
285
+ usage = CompletionUsage(
286
+ prompt_tokens=prompt_tokens,
287
+ completion_tokens=completion_tokens,
288
+ total_tokens=total_tokens
289
+ )
290
+
291
+ # Create the completion object
292
+ completion = ChatCompletion(
293
+ id=request_id,
294
+ choices=[choice],
295
+ created=created_time,
296
+ model=model,
297
+ usage=usage,
298
+ )
299
+
300
+ return completion
301
+
302
+ except Exception as e:
303
+ print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
304
+ raise IOError(f"UncovrAI request failed: {e}") from e
305
+
306
+ class Chat(BaseChat):
307
+ def __init__(self, client: 'UncovrAI'):
308
+ self.completions = Completions(client)
309
+
310
+ class UncovrAI(OpenAICompatibleProvider):
311
+ """
312
+ OpenAI-compatible client for Uncovr AI API.
313
+
314
+ Usage:
315
+ client = UncovrAI()
316
+ response = client.chat.completions.create(
317
+ model="default",
318
+ messages=[{"role": "user", "content": "Hello!"}]
319
+ )
320
+ print(response.choices[0].message.content)
321
+ """
322
+
323
+ AVAILABLE_MODELS = [
324
+ "default",
325
+ "gpt-4o-mini",
326
+ "gemini-2-flash",
327
+ "gemini-2-flash-lite",
328
+ "groq-llama-3-1-8b",
329
+ "o3-mini",
330
+ "deepseek-r1-distill-qwen-32b",
331
+ # The following models are not available in the free plan:
332
+ # "claude-3-7-sonnet",
333
+ # "gpt-4o",
334
+ # "claude-3-5-sonnet-v2",
335
+ # "deepseek-r1-distill-llama-70b",
336
+ # "gemini-2-flash-lite-preview",
337
+ # "qwen-qwq-32b"
338
+ ]
339
+
340
+ def __init__(
341
+ self,
342
+ timeout: int = 30,
343
+ browser: str = "chrome",
344
+ chat_id: Optional[str] = None,
345
+ user_id: Optional[str] = None,
346
+ proxies: dict = {}
347
+ ):
348
+ """
349
+ Initialize the UncovrAI client.
350
+
351
+ Args:
352
+ timeout: Request timeout in seconds
353
+ browser: Browser name for LitAgent to generate fingerprint
354
+ chat_id: Optional chat ID (will generate one if not provided)
355
+ user_id: Optional user ID (will generate one if not provided)
356
+ proxies: Optional proxy configuration
357
+ """
358
+ self.url = "https://uncovr.app/api/workflows/chat"
359
+ self.timeout = timeout
360
+
361
+ # Initialize LitAgent for user agent generation
362
+ self.agent = LitAgent()
363
+
364
+ # Use fingerprinting to create a consistent browser identity
365
+ self.fingerprint = self.agent.generate_fingerprint(browser)
366
+
367
+ # Use the fingerprint for headers
368
+ self.headers = {
369
+ "Accept": self.fingerprint["accept"],
370
+ "Accept-Encoding": "gzip, deflate, br, zstd",
371
+ "Accept-Language": self.fingerprint["accept_language"],
372
+ "Content-Type": "application/json",
373
+ "Origin": "https://uncovr.app",
374
+ "Referer": "https://uncovr.app/",
375
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
376
+ "Sec-CH-UA-Mobile": "?0",
377
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
378
+ "User-Agent": self.fingerprint["user_agent"],
379
+ "Sec-Fetch-Dest": "empty",
380
+ "Sec-Fetch-Mode": "cors",
381
+ "Sec-Fetch-Site": "same-origin"
382
+ }
383
+
384
+ # Use cloudscraper to bypass Cloudflare protection
385
+ self.session = cloudscraper.create_scraper()
386
+ self.session.headers.update(self.headers)
387
+ self.session.proxies.update(proxies)
388
+
389
+ # Set chat and user IDs
390
+ self.chat_id = chat_id or str(uuid.uuid4())
391
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
392
+
393
+ # Initialize chat interface
394
+ self.chat = Chat(self)
395
+
396
+ def refresh_identity(self, browser: str = None):
397
+ """
398
+ Refreshes the browser identity fingerprint.
399
+
400
+ Args:
401
+ browser: Specific browser to use for the new fingerprint
402
+ """
403
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
404
+ self.fingerprint = self.agent.generate_fingerprint(browser)
405
+
406
+ # Update headers with new fingerprint
407
+ self.headers.update({
408
+ "Accept": self.fingerprint["accept"],
409
+ "Accept-Language": self.fingerprint["accept_language"],
410
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
411
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
412
+ "User-Agent": self.fingerprint["user_agent"],
413
+ })
414
+
415
+ # Update session headers
416
+ for header, value in self.headers.items():
417
+ self.session.headers[header] = value
418
+
419
+ return self.fingerprint
420
+
421
+ def format_text(self, text: str) -> str:
422
+ """
423
+ Format text by replacing escaped newlines with actual newlines.
424
+
425
+ Args:
426
+ text: Text to format
427
+
428
+ Returns:
429
+ Formatted text
430
+ """
431
+ # Use a more comprehensive approach to handle all escape sequences
432
+ try:
433
+ # First handle double backslashes to avoid issues
434
+ text = text.replace('\\\\', '\\')
435
+
436
+ # Handle common escape sequences
437
+ text = text.replace('\\n', '\n')
438
+ text = text.replace('\\r', '\r')
439
+ text = text.replace('\\t', '\t')
440
+ text = text.replace('\\"', '"')
441
+ text = text.replace("\\'", "'")
442
+
443
+ # Handle any remaining escape sequences using JSON decoding
444
+ try:
445
+ # Add quotes to make it a valid JSON string
446
+ json_str = f'"{text}"'
447
+ # Use json module to decode all escape sequences
448
+ decoded = json.loads(json_str)
449
+ return decoded
450
+ except json.JSONDecodeError:
451
+ # If JSON decoding fails, return the text with the replacements we've already done
452
+ return text
453
+ except Exception as e:
454
+ # If any error occurs, return the original text
455
+ print(f"{RED}Warning: Error formatting text: {e}{RESET}")
456
+ return text
457
+
458
+ @property
459
+ def models(self):
460
+ class _ModelList:
461
+ def list(inner_self):
462
+ return type(self).AVAILABLE_MODELS
463
+ return _ModelList()