webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,368 +1,368 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import uuid
5
- import re
6
- from typing import Any, Dict, Optional, Generator, Union
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
9
- from webscout.AIutel import AwesomePrompts
10
- from webscout.AIbase import Provider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
- class UncovrAI(Provider):
15
- """
16
- A class to interact with the Uncovr AI chat API.
17
- """
18
-
19
- AVAILABLE_MODELS = [
20
- "default",
21
- "gpt-4o-mini",
22
- "gemini-2-flash",
23
- "gemini-2-flash-lite",
24
- "groq-llama-3-1-8b",
25
- "o3-mini",
26
- "deepseek-r1-distill-qwen-32b",
27
- # The following models are not available in the free plan:
28
- # "claude-3-7-sonnet",
29
- # "gpt-4o",
30
- # "claude-3-5-sonnet-v2",
31
- # "deepseek-r1-distill-llama-70b",
32
- # "gemini-2-flash-lite-preview",
33
- # "qwen-qwq-32b"
34
- ]
35
-
36
- def __init__(
37
- self,
38
- is_conversation: bool = True,
39
- max_tokens: int = 2049,
40
- timeout: int = 30,
41
- intro: str = None,
42
- filepath: str = None,
43
- update_file: bool = True,
44
- proxies: dict = {},
45
- history_offset: int = 10250,
46
- act: str = None,
47
- model: str = "default",
48
- chat_id: str = None,
49
- user_id: str = None,
50
- browser: str = "chrome"
51
- ):
52
- """Initializes the Uncovr AI API client."""
53
- if model not in self.AVAILABLE_MODELS:
54
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
-
56
- self.url = "https://uncovr.app/api/workflows/chat"
57
-
58
- # Initialize LitAgent for user agent generation
59
- self.agent = LitAgent()
60
- # Use fingerprinting to create a consistent browser identity
61
- self.fingerprint = self.agent.generate_fingerprint(browser)
62
-
63
- # Use the fingerprint for headers
64
- self.headers = {
65
- "Accept": self.fingerprint["accept"],
66
- "Accept-Encoding": "gzip, deflate, br, zstd",
67
- "Accept-Language": self.fingerprint["accept_language"],
68
- "Content-Type": "application/json",
69
- "Origin": "https://uncovr.app",
70
- "Referer": "https://uncovr.app/",
71
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
72
- "Sec-CH-UA-Mobile": "?0",
73
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
74
- "User-Agent": self.fingerprint["user_agent"],
75
- "Sec-Fetch-Dest": "empty",
76
- "Sec-Fetch-Mode": "cors",
77
- "Sec-Fetch-Site": "same-origin"
78
- }
79
-
80
- # Initialize curl_cffi Session
81
- self.session = Session()
82
- # Update curl_cffi session headers and proxies
83
- self.session.headers.update(self.headers)
84
- self.session.proxies.update(proxies)
85
-
86
- self.is_conversation = is_conversation
87
- self.max_tokens_to_sample = max_tokens
88
- self.timeout = timeout
89
- self.last_response = {}
90
- self.model = model
91
- self.chat_id = chat_id or str(uuid.uuid4())
92
- self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
93
-
94
- self.__available_optimizers = (
95
- method
96
- for method in dir(Optimizers)
97
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
98
- )
99
- Conversation.intro = (
100
- AwesomePrompts().get_act(
101
- act, raise_not_found=True, default=None, case_insensitive=True
102
- )
103
- if act
104
- else intro or Conversation.intro
105
- )
106
-
107
- self.conversation = Conversation(
108
- is_conversation, self.max_tokens_to_sample, filepath, update_file
109
- )
110
- self.conversation.history_offset = history_offset
111
-
112
- @staticmethod
113
- def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
- """Extracts content from the UncovrAI stream format '0:"..."'."""
115
- if isinstance(chunk, str):
116
- match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
117
- if match:
118
- # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
119
- content = match.group(1).encode().decode('unicode_escape')
120
- return content.replace('\\\\', '\\').replace('\\"', '"')
121
- return None
122
-
123
- def refresh_identity(self, browser: str = None):
124
- """
125
- Refreshes the browser identity fingerprint.
126
-
127
- Args:
128
- browser: Specific browser to use for the new fingerprint
129
- """
130
- browser = browser or self.fingerprint.get("browser_type", "chrome")
131
- self.fingerprint = self.agent.generate_fingerprint(browser)
132
-
133
- # Update headers with new fingerprint
134
- self.headers.update({
135
- "Accept": self.fingerprint["accept"],
136
- "Accept-Language": self.fingerprint["accept_language"],
137
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
138
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
139
- "User-Agent": self.fingerprint["user_agent"],
140
- })
141
-
142
- # Update session headers
143
- for header, value in self.headers.items():
144
- self.session.headers[header] = value
145
-
146
- return self.fingerprint
147
-
148
- def ask(
149
- self,
150
- prompt: str,
151
- stream: bool = False,
152
- raw: bool = False,
153
- optimizer: str = None,
154
- conversationally: bool = False,
155
- temperature: int = 32,
156
- creativity: str = "medium",
157
- selected_focus: list = ["web"],
158
- selected_tools: list = ["quick-cards"]
159
- ) -> Union[Dict[str, Any], Generator]:
160
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
161
- if optimizer:
162
- if optimizer in self.__available_optimizers:
163
- conversation_prompt = getattr(Optimizers, optimizer)(
164
- conversation_prompt if conversationally else prompt
165
- )
166
- else:
167
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
168
-
169
- # Prepare the request payload
170
- payload = {
171
- "content": conversation_prompt,
172
- "chatId": self.chat_id,
173
- "userMessageId": str(uuid.uuid4()),
174
- "ai_config": {
175
- "selectedFocus": selected_focus,
176
- "selectedTools": selected_tools,
177
- "agentId": "chat",
178
- "modelId": self.model,
179
- "temperature": temperature,
180
- "creativity": creativity
181
- }
182
- }
183
-
184
- def for_stream():
185
- try:
186
- # Use curl_cffi session post with impersonate
187
- response = self.session.post(
188
- self.url,
189
- json=payload,
190
- stream=True,
191
- timeout=self.timeout,
192
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
193
- )
194
-
195
- if response.status_code != 200:
196
- # If we get a non-200 response, try refreshing our identity once
197
- if response.status_code in [403, 429]:
198
- self.refresh_identity()
199
- # Retry with new identity using curl_cffi session
200
- retry_response = self.session.post(
201
- self.url,
202
- json=payload,
203
- stream=True,
204
- timeout=self.timeout,
205
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
206
- )
207
- if not retry_response.ok:
208
- raise exceptions.FailedToGenerateResponseError(
209
- f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
- )
211
- response = retry_response # Use the successful retry response
212
- else:
213
- raise exceptions.FailedToGenerateResponseError(
214
- f"Request failed with status code {response.status_code} - {response.text}"
215
- )
216
-
217
- streaming_text = ""
218
- # Use sanitize_stream with the custom extractor
219
- processed_stream = sanitize_stream(
220
- data=response.iter_content(chunk_size=None), # Pass byte iterator
221
- intro_value=None, # No simple prefix
222
- to_json=False, # Content is not JSON
223
- content_extractor=self._uncovr_extractor, # Use the specific extractor
224
- yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
225
- )
226
-
227
- for content_chunk in processed_stream:
228
- if content_chunk and isinstance(content_chunk, str):
229
- streaming_text += content_chunk
230
- yield dict(text=content_chunk) if not raw else content_chunk
231
-
232
- self.last_response = {"text": streaming_text}
233
- self.conversation.update_chat_history(prompt, streaming_text)
234
-
235
- except CurlError as e: # Catch CurlError
236
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
- except Exception as e: # Catch other potential exceptions
238
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
-
240
-
241
- def for_non_stream():
242
- try:
243
- # Use curl_cffi session post with impersonate
244
- response = self.session.post(
245
- self.url,
246
- json=payload,
247
- timeout=self.timeout,
248
- impersonate=self.fingerprint.get("browser_type", "chrome110")
249
- )
250
-
251
- if response.status_code != 200:
252
- if response.status_code in [403, 429]:
253
- self.refresh_identity()
254
- # Retry with new identity using curl_cffi session
255
- response = self.session.post(
256
- self.url,
257
- json=payload,
258
- timeout=self.timeout,
259
- impersonate=self.fingerprint.get("browser_type", "chrome110")
260
- )
261
- if not response.ok:
262
- raise exceptions.FailedToGenerateResponseError(
263
- f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
264
- )
265
- else:
266
- raise exceptions.FailedToGenerateResponseError(
267
- f"Request failed with status code {response.status_code} - {response.text}"
268
- )
269
-
270
- response_text = response.text # Get the full response text
271
-
272
- # Use sanitize_stream to process the non-streaming text
273
- # It won't parse as JSON, but will apply the extractor line by line
274
- processed_stream = sanitize_stream(
275
- data=response_text.splitlines(), # Split into lines first
276
- intro_value=None,
277
- to_json=False,
278
- content_extractor=self._uncovr_extractor,
279
- yield_raw_on_error=True
280
- )
281
-
282
- # Aggregate the results from the generator
283
- full_response = ""
284
- for content in processed_stream:
285
- if content and isinstance(content, str):
286
- full_response += content
287
-
288
- # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
289
- self.last_response = {"text": full_response}
290
- self.conversation.update_chat_history(prompt, full_response)
291
- return {"text": full_response}
292
-
293
- except CurlError as e: # Catch CurlError
294
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
- except Exception as e: # Catch other potential exceptions
296
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
-
298
-
299
- return for_stream() if stream else for_non_stream()
300
-
301
- def chat(
302
- self,
303
- prompt: str,
304
- stream: bool = False,
305
- optimizer: str = None,
306
- conversationally: bool = False,
307
- temperature: int = 32,
308
- creativity: str = "medium",
309
- selected_focus: list = ["web"],
310
- selected_tools: list = []
311
- ) -> Union[str, Generator[str, None, None]]:
312
- def for_stream():
313
- for response in self.ask(
314
- prompt, True, optimizer=optimizer, conversationally=conversationally,
315
- temperature=temperature, creativity=creativity,
316
- selected_focus=selected_focus, selected_tools=selected_tools
317
- ):
318
- yield self.get_message(response)
319
- def for_non_stream():
320
- return self.get_message(
321
- self.ask(
322
- prompt, False, optimizer=optimizer, conversationally=conversationally,
323
- temperature=temperature, creativity=creativity,
324
- selected_focus=selected_focus, selected_tools=selected_tools
325
- )
326
- )
327
- return for_stream() if stream else for_non_stream()
328
-
329
- def get_message(self, response: dict) -> str:
330
- assert isinstance(response, dict), "Response should be of dict data-type only"
331
- # Formatting handled by extractor
332
- text = response.get("text", "")
333
- return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
334
-
335
- if __name__ == "__main__":
336
- # Ensure curl_cffi is installed
337
- print("-" * 80)
338
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
339
- print("-" * 80)
340
-
341
- for model in UncovrAI.AVAILABLE_MODELS:
342
- try:
343
- test_ai = UncovrAI(model=model, timeout=60)
344
- # Test non-stream first as stream logic depends on it
345
- response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
346
-
347
- if response_non_stream and len(response_non_stream.strip()) > 0:
348
- # Now test stream
349
- response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
- response_text = ""
351
- for chunk in response_stream:
352
- response_text += chunk
353
-
354
- if response_text and len(response_text.strip()) > 0:
355
- status = "✓"
356
- # Clean and truncate response
357
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
- else:
360
- status = "✗ (Stream)"
361
- display_text = "Empty or invalid stream response"
362
- else:
363
- status = "✗ (Non-Stream)"
364
- display_text = "Empty or invalid non-stream response"
365
-
366
- print(f"\r{model:<50} {status:<10} {display_text}")
367
- except Exception as e:
368
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ import re
6
+ from typing import Any, Dict, Optional, Generator, Union
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class UncovrAI(Provider):
15
+ """
16
+ A class to interact with the Uncovr AI chat API.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "default",
21
+ "gpt-4o-mini",
22
+ "gemini-2-flash",
23
+ "gemini-2-flash-lite",
24
+ "groq-llama-3-1-8b",
25
+ "o3-mini",
26
+ "deepseek-r1-distill-qwen-32b",
27
+ # The following models are not available in the free plan:
28
+ # "claude-3-7-sonnet",
29
+ # "gpt-4o",
30
+ # "claude-3-5-sonnet-v2",
31
+ # "deepseek-r1-distill-llama-70b",
32
+ # "gemini-2-flash-lite-preview",
33
+ # "qwen-qwq-32b"
34
+ ]
35
+
36
+ def __init__(
37
+ self,
38
+ is_conversation: bool = True,
39
+ max_tokens: int = 2049,
40
+ timeout: int = 30,
41
+ intro: str = None,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ history_offset: int = 10250,
46
+ act: str = None,
47
+ model: str = "default",
48
+ chat_id: str = None,
49
+ user_id: str = None,
50
+ browser: str = "chrome"
51
+ ):
52
+ """Initializes the Uncovr AI API client."""
53
+ if model not in self.AVAILABLE_MODELS:
54
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
+
56
+ self.url = "https://uncovr.app/api/workflows/chat"
57
+
58
+ # Initialize LitAgent for user agent generation
59
+ self.agent = LitAgent()
60
+ # Use fingerprinting to create a consistent browser identity
61
+ self.fingerprint = self.agent.generate_fingerprint(browser)
62
+
63
+ # Use the fingerprint for headers
64
+ self.headers = {
65
+ "Accept": self.fingerprint["accept"],
66
+ "Accept-Encoding": "gzip, deflate, br, zstd",
67
+ "Accept-Language": self.fingerprint["accept_language"],
68
+ "Content-Type": "application/json",
69
+ "Origin": "https://uncovr.app",
70
+ "Referer": "https://uncovr.app/",
71
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
72
+ "Sec-CH-UA-Mobile": "?0",
73
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
74
+ "User-Agent": self.fingerprint["user_agent"],
75
+ "Sec-Fetch-Dest": "empty",
76
+ "Sec-Fetch-Mode": "cors",
77
+ "Sec-Fetch-Site": "same-origin"
78
+ }
79
+
80
+ # Initialize curl_cffi Session
81
+ self.session = Session()
82
+ # Update curl_cffi session headers and proxies
83
+ self.session.headers.update(self.headers)
84
+ self.session.proxies.update(proxies)
85
+
86
+ self.is_conversation = is_conversation
87
+ self.max_tokens_to_sample = max_tokens
88
+ self.timeout = timeout
89
+ self.last_response = {}
90
+ self.model = model
91
+ self.chat_id = chat_id or str(uuid.uuid4())
92
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
93
+
94
+ self.__available_optimizers = (
95
+ method
96
+ for method in dir(Optimizers)
97
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
98
+ )
99
+ Conversation.intro = (
100
+ AwesomePrompts().get_act(
101
+ act, raise_not_found=True, default=None, case_insensitive=True
102
+ )
103
+ if act
104
+ else intro or Conversation.intro
105
+ )
106
+
107
+ self.conversation = Conversation(
108
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
109
+ )
110
+ self.conversation.history_offset = history_offset
111
+
112
+ @staticmethod
113
+ def _uncovr_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts content from the UncovrAI stream format '0:"..."'."""
115
+ if isinstance(chunk, str):
116
+ match = re.match(r'^0:\s*"?(.*?)"?$', chunk) # Match 0: maybe optional quotes
117
+ if match:
118
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
119
+ content = match.group(1).encode().decode('unicode_escape')
120
+ return content.replace('\\\\', '\\').replace('\\"', '"')
121
+ return None
122
+
123
+ def refresh_identity(self, browser: str = None):
124
+ """
125
+ Refreshes the browser identity fingerprint.
126
+
127
+ Args:
128
+ browser: Specific browser to use for the new fingerprint
129
+ """
130
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
131
+ self.fingerprint = self.agent.generate_fingerprint(browser)
132
+
133
+ # Update headers with new fingerprint
134
+ self.headers.update({
135
+ "Accept": self.fingerprint["accept"],
136
+ "Accept-Language": self.fingerprint["accept_language"],
137
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
138
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
139
+ "User-Agent": self.fingerprint["user_agent"],
140
+ })
141
+
142
+ # Update session headers
143
+ for header, value in self.headers.items():
144
+ self.session.headers[header] = value
145
+
146
+ return self.fingerprint
147
+
148
+ def ask(
149
+ self,
150
+ prompt: str,
151
+ stream: bool = False,
152
+ raw: bool = False,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ temperature: int = 32,
156
+ creativity: str = "medium",
157
+ selected_focus: list = ["web"],
158
+ selected_tools: list = ["quick-cards"]
159
+ ) -> Union[Dict[str, Any], Generator]:
160
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
161
+ if optimizer:
162
+ if optimizer in self.__available_optimizers:
163
+ conversation_prompt = getattr(Optimizers, optimizer)(
164
+ conversation_prompt if conversationally else prompt
165
+ )
166
+ else:
167
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
168
+
169
+ # Prepare the request payload
170
+ payload = {
171
+ "content": conversation_prompt,
172
+ "chatId": self.chat_id,
173
+ "userMessageId": str(uuid.uuid4()),
174
+ "ai_config": {
175
+ "selectedFocus": selected_focus,
176
+ "selectedTools": selected_tools,
177
+ "agentId": "chat",
178
+ "modelId": self.model,
179
+ "temperature": temperature,
180
+ "creativity": creativity
181
+ }
182
+ }
183
+
184
+ def for_stream():
185
+ try:
186
+ # Use curl_cffi session post with impersonate
187
+ response = self.session.post(
188
+ self.url,
189
+ json=payload,
190
+ stream=True,
191
+ timeout=self.timeout,
192
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
193
+ )
194
+
195
+ if response.status_code != 200:
196
+ # If we get a non-200 response, try refreshing our identity once
197
+ if response.status_code in [403, 429]:
198
+ self.refresh_identity()
199
+ # Retry with new identity using curl_cffi session
200
+ retry_response = self.session.post(
201
+ self.url,
202
+ json=payload,
203
+ stream=True,
204
+ timeout=self.timeout,
205
+ impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
206
+ )
207
+ if not retry_response.ok:
208
+ raise exceptions.FailedToGenerateResponseError(
209
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
+ )
211
+ response = retry_response # Use the successful retry response
212
+ else:
213
+ raise exceptions.FailedToGenerateResponseError(
214
+ f"Request failed with status code {response.status_code} - {response.text}"
215
+ )
216
+
217
+ streaming_text = ""
218
+ # Use sanitize_stream with the custom extractor
219
+ processed_stream = sanitize_stream(
220
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
221
+ intro_value=None, # No simple prefix
222
+ to_json=False, # Content is not JSON
223
+ content_extractor=self._uncovr_extractor, # Use the specific extractor
224
+ yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
225
+ )
226
+
227
+ for content_chunk in processed_stream:
228
+ if content_chunk and isinstance(content_chunk, str):
229
+ streaming_text += content_chunk
230
+ yield dict(text=content_chunk) if not raw else content_chunk
231
+
232
+ self.last_response = {"text": streaming_text}
233
+ self.conversation.update_chat_history(prompt, streaming_text)
234
+
235
+ except CurlError as e: # Catch CurlError
236
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
+ except Exception as e: # Catch other potential exceptions
238
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
+
240
+
241
+ def for_non_stream():
242
+ try:
243
+ # Use curl_cffi session post with impersonate
244
+ response = self.session.post(
245
+ self.url,
246
+ json=payload,
247
+ timeout=self.timeout,
248
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
249
+ )
250
+
251
+ if response.status_code != 200:
252
+ if response.status_code in [403, 429]:
253
+ self.refresh_identity()
254
+ # Retry with new identity using curl_cffi session
255
+ response = self.session.post(
256
+ self.url,
257
+ json=payload,
258
+ timeout=self.timeout,
259
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
260
+ )
261
+ if not response.ok:
262
+ raise exceptions.FailedToGenerateResponseError(
263
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
264
+ )
265
+ else:
266
+ raise exceptions.FailedToGenerateResponseError(
267
+ f"Request failed with status code {response.status_code} - {response.text}"
268
+ )
269
+
270
+ response_text = response.text # Get the full response text
271
+
272
+ # Use sanitize_stream to process the non-streaming text
273
+ # It won't parse as JSON, but will apply the extractor line by line
274
+ processed_stream = sanitize_stream(
275
+ data=response_text.splitlines(), # Split into lines first
276
+ intro_value=None,
277
+ to_json=False,
278
+ content_extractor=self._uncovr_extractor,
279
+ yield_raw_on_error=True
280
+ )
281
+
282
+ # Aggregate the results from the generator
283
+ full_response = ""
284
+ for content in processed_stream:
285
+ if content and isinstance(content, str):
286
+ full_response += content
287
+
288
+ # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
289
+ self.last_response = {"text": full_response}
290
+ self.conversation.update_chat_history(prompt, full_response)
291
+ return {"text": full_response}
292
+
293
+ except CurlError as e: # Catch CurlError
294
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
+ except Exception as e: # Catch other potential exceptions
296
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
+
298
+
299
+ return for_stream() if stream else for_non_stream()
300
+
301
+ def chat(
302
+ self,
303
+ prompt: str,
304
+ stream: bool = False,
305
+ optimizer: str = None,
306
+ conversationally: bool = False,
307
+ temperature: int = 32,
308
+ creativity: str = "medium",
309
+ selected_focus: list = ["web"],
310
+ selected_tools: list = []
311
+ ) -> Union[str, Generator[str, None, None]]:
312
+ def for_stream():
313
+ for response in self.ask(
314
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
315
+ temperature=temperature, creativity=creativity,
316
+ selected_focus=selected_focus, selected_tools=selected_tools
317
+ ):
318
+ yield self.get_message(response)
319
+ def for_non_stream():
320
+ return self.get_message(
321
+ self.ask(
322
+ prompt, False, optimizer=optimizer, conversationally=conversationally,
323
+ temperature=temperature, creativity=creativity,
324
+ selected_focus=selected_focus, selected_tools=selected_tools
325
+ )
326
+ )
327
+ return for_stream() if stream else for_non_stream()
328
+
329
+ def get_message(self, response: dict) -> str:
330
+ assert isinstance(response, dict), "Response should be of dict data-type only"
331
+ # Formatting handled by extractor
332
+ text = response.get("text", "")
333
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
334
+
335
+ if __name__ == "__main__":
336
+ # Ensure curl_cffi is installed
337
+ print("-" * 80)
338
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
339
+ print("-" * 80)
340
+
341
+ for model in UncovrAI.AVAILABLE_MODELS:
342
+ try:
343
+ test_ai = UncovrAI(model=model, timeout=60)
344
+ # Test non-stream first as stream logic depends on it
345
+ response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
346
+
347
+ if response_non_stream and len(response_non_stream.strip()) > 0:
348
+ # Now test stream
349
+ response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
+ response_text = ""
351
+ for chunk in response_stream:
352
+ response_text += chunk
353
+
354
+ if response_text and len(response_text.strip()) > 0:
355
+ status = "✓"
356
+ # Clean and truncate response
357
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
+ else:
360
+ status = "✗ (Stream)"
361
+ display_text = "Empty or invalid stream response"
362
+ else:
363
+ status = "✗ (Non-Stream)"
364
+ display_text = "Empty or invalid non-stream response"
365
+
366
+ print(f"\r{model:<50} {status:<10} {display_text}")
367
+ except Exception as e:
368
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")