webscout 7.7__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (134) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +12 -29
  3. webscout/DWEBS.py +477 -461
  4. webscout/Extra/__init__.py +2 -0
  5. webscout/Extra/autocoder/__init__.py +9 -9
  6. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -790
  7. webscout/Extra/autocoder/autocoder_utiles.py +332 -194
  8. webscout/Extra/gguf.py +682 -682
  9. webscout/Extra/tempmail/__init__.py +26 -0
  10. webscout/Extra/tempmail/async_utils.py +141 -0
  11. webscout/Extra/tempmail/base.py +156 -0
  12. webscout/Extra/tempmail/cli.py +187 -0
  13. webscout/Extra/tempmail/mail_tm.py +361 -0
  14. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  15. webscout/Provider/AI21.py +1 -1
  16. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  17. webscout/Provider/AISEARCH/ISou.py +2 -2
  18. webscout/Provider/AISEARCH/felo_search.py +6 -6
  19. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  20. webscout/Provider/Aitopia.py +292 -0
  21. webscout/Provider/AllenAI.py +1 -1
  22. webscout/Provider/Andi.py +3 -3
  23. webscout/Provider/C4ai.py +1 -1
  24. webscout/Provider/ChatGPTES.py +3 -5
  25. webscout/Provider/ChatGPTGratis.py +4 -4
  26. webscout/Provider/Chatify.py +2 -2
  27. webscout/Provider/Cloudflare.py +3 -2
  28. webscout/Provider/DeepSeek.py +2 -2
  29. webscout/Provider/Deepinfra.py +288 -286
  30. webscout/Provider/ElectronHub.py +709 -634
  31. webscout/Provider/ExaChat.py +325 -0
  32. webscout/Provider/Free2GPT.py +2 -2
  33. webscout/Provider/Gemini.py +167 -179
  34. webscout/Provider/GithubChat.py +1 -1
  35. webscout/Provider/Glider.py +4 -4
  36. webscout/Provider/Groq.py +41 -27
  37. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  38. webscout/Provider/HeckAI.py +1 -1
  39. webscout/Provider/HuggingFaceChat.py +1 -1
  40. webscout/Provider/Hunyuan.py +1 -1
  41. webscout/Provider/Jadve.py +3 -3
  42. webscout/Provider/Koboldai.py +3 -3
  43. webscout/Provider/LambdaChat.py +3 -2
  44. webscout/Provider/Llama.py +3 -5
  45. webscout/Provider/Llama3.py +4 -12
  46. webscout/Provider/Marcus.py +3 -3
  47. webscout/Provider/OLLAMA.py +8 -8
  48. webscout/Provider/Openai.py +7 -3
  49. webscout/Provider/PI.py +1 -1
  50. webscout/Provider/Perplexitylabs.py +1 -1
  51. webscout/Provider/Phind.py +1 -1
  52. webscout/Provider/PizzaGPT.py +1 -1
  53. webscout/Provider/QwenLM.py +4 -7
  54. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
  55. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
  56. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  57. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  58. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  59. webscout/Provider/TTI/__init__.py +3 -1
  60. webscout/Provider/TTI/artbit/async_artbit.py +1 -1
  61. webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
  62. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  63. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  64. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  65. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  66. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  67. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  68. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  69. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  70. webscout/Provider/TTS/utils.py +1 -1
  71. webscout/Provider/TeachAnything.py +1 -1
  72. webscout/Provider/TextPollinationsAI.py +232 -230
  73. webscout/Provider/TwoAI.py +1 -2
  74. webscout/Provider/Venice.py +4 -2
  75. webscout/Provider/VercelAI.py +234 -0
  76. webscout/Provider/WebSim.py +3 -2
  77. webscout/Provider/WiseCat.py +10 -12
  78. webscout/Provider/Youchat.py +1 -1
  79. webscout/Provider/__init__.py +10 -4
  80. webscout/Provider/ai4chat.py +1 -1
  81. webscout/Provider/aimathgpt.py +2 -6
  82. webscout/Provider/akashgpt.py +1 -1
  83. webscout/Provider/askmyai.py +4 -4
  84. webscout/Provider/{DARKAI.py → asksteve.py} +56 -77
  85. webscout/Provider/bagoodex.py +2 -2
  86. webscout/Provider/cerebras.py +1 -1
  87. webscout/Provider/chatglm.py +4 -4
  88. webscout/Provider/cleeai.py +1 -0
  89. webscout/Provider/copilot.py +21 -9
  90. webscout/Provider/elmo.py +1 -1
  91. webscout/Provider/flowith.py +1 -1
  92. webscout/Provider/freeaichat.py +64 -31
  93. webscout/Provider/gaurish.py +3 -5
  94. webscout/Provider/geminiprorealtime.py +1 -1
  95. webscout/Provider/granite.py +4 -4
  96. webscout/Provider/hermes.py +5 -5
  97. webscout/Provider/julius.py +1 -1
  98. webscout/Provider/koala.py +1 -1
  99. webscout/Provider/lepton.py +1 -1
  100. webscout/Provider/llama3mitril.py +4 -4
  101. webscout/Provider/llamatutor.py +1 -1
  102. webscout/Provider/llmchat.py +3 -3
  103. webscout/Provider/meta.py +1 -1
  104. webscout/Provider/multichat.py +10 -10
  105. webscout/Provider/promptrefine.py +1 -1
  106. webscout/Provider/searchchat.py +293 -0
  107. webscout/Provider/sonus.py +2 -2
  108. webscout/Provider/talkai.py +2 -2
  109. webscout/Provider/turboseek.py +1 -1
  110. webscout/Provider/tutorai.py +1 -1
  111. webscout/Provider/typegpt.py +5 -42
  112. webscout/Provider/uncovr.py +312 -297
  113. webscout/Provider/x0gpt.py +1 -1
  114. webscout/Provider/yep.py +64 -12
  115. webscout/__init__.py +3 -1
  116. webscout/cli.py +59 -98
  117. webscout/conversation.py +350 -17
  118. webscout/litprinter/__init__.py +59 -667
  119. webscout/optimizers.py +419 -419
  120. webscout/tempid.py +11 -11
  121. webscout/update_checker.py +14 -12
  122. webscout/utils.py +2 -2
  123. webscout/version.py +1 -1
  124. webscout/webscout_search.py +146 -87
  125. webscout/webscout_search_async.py +148 -27
  126. {webscout-7.7.dist-info → webscout-7.9.dist-info}/METADATA +92 -66
  127. webscout-7.9.dist-info/RECORD +248 -0
  128. webscout/Provider/EDITEE.py +0 -192
  129. webscout/litprinter/colors.py +0 -54
  130. webscout-7.7.dist-info/RECORD +0 -234
  131. {webscout-7.7.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  132. {webscout-7.7.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  133. {webscout-7.7.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  134. {webscout-7.7.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -1,297 +1,312 @@
1
- import requests
2
- import json
3
- import uuid
4
- from typing import Any, Dict, Optional, Generator, Union
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
- from webscout.litagent import LitAgent
11
-
12
- class UncovrAI(Provider):
13
- """
14
- A class to interact with the Uncovr AI chat API.
15
- """
16
-
17
- AVAILABLE_MODELS = [
18
- "default",
19
- "gpt-4o-mini",
20
- "gemini-2-flash",
21
- "o3-mini",
22
- "claude-3-7-sonnet",
23
- "gpt-4o",
24
- "claude-3-5-sonnet-v2",
25
- "groq-llama-3-1-8b",
26
- "deepseek-r1-distill-llama-70b",
27
- "deepseek-r1-distill-qwen-32b",
28
- "gemini-2-flash-lite-preview",
29
- "qwen-qwq-32b"
30
- ]
31
-
32
- def __init__(
33
- self,
34
- is_conversation: bool = True,
35
- max_tokens: int = 2049,
36
- timeout: int = 30,
37
- intro: str = None,
38
- filepath: str = None,
39
- update_file: bool = True,
40
- proxies: dict = {},
41
- history_offset: int = 10250,
42
- act: str = None,
43
- model: str = "default",
44
- chat_id: str = None,
45
- user_id: str = None,
46
- browser: str = "chrome"
47
- ):
48
- """Initializes the Uncovr AI API client."""
49
- if model not in self.AVAILABLE_MODELS:
50
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
-
52
- self.url = "https://uncovr.app/api/workflows/chat"
53
-
54
- # Initialize LitAgent for user agent generation
55
- self.agent = LitAgent()
56
- # Use fingerprinting to create a consistent browser identity
57
- self.fingerprint = self.agent.generate_fingerprint(browser)
58
-
59
- # Use the fingerprint for headers
60
- self.headers = {
61
- "Accept": self.fingerprint["accept"],
62
- "Accept-Encoding": "gzip, deflate, br, zstd",
63
- "Accept-Language": self.fingerprint["accept_language"],
64
- "Content-Type": "application/json",
65
- "Origin": "https://uncovr.app",
66
- "Referer": "https://uncovr.app/",
67
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
68
- "Sec-CH-UA-Mobile": "?0",
69
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
70
- "User-Agent": self.fingerprint["user_agent"],
71
- "Sec-Fetch-Dest": "empty",
72
- "Sec-Fetch-Mode": "cors",
73
- "Sec-Fetch-Site": "same-origin"
74
- }
75
-
76
- self.session = requests.Session()
77
- self.session.headers.update(self.headers)
78
- self.session.proxies.update(proxies)
79
-
80
- self.is_conversation = is_conversation
81
- self.max_tokens_to_sample = max_tokens
82
- self.timeout = timeout
83
- self.last_response = {}
84
- self.model = model
85
- self.chat_id = chat_id or str(uuid.uuid4())
86
- self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
87
-
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
- Conversation.intro = (
94
- AwesomePrompts().get_act(
95
- act, raise_not_found=True, default=None, case_insensitive=True
96
- )
97
- if act
98
- else intro or Conversation.intro
99
- )
100
-
101
- self.conversation = Conversation(
102
- is_conversation, self.max_tokens_to_sample, filepath, update_file
103
- )
104
- self.conversation.history_offset = history_offset
105
-
106
- def refresh_identity(self, browser: str = None):
107
- """
108
- Refreshes the browser identity fingerprint.
109
-
110
- Args:
111
- browser: Specific browser to use for the new fingerprint
112
- """
113
- browser = browser or self.fingerprint.get("browser_type", "chrome")
114
- self.fingerprint = self.agent.generate_fingerprint(browser)
115
-
116
- # Update headers with new fingerprint
117
- self.headers.update({
118
- "Accept": self.fingerprint["accept"],
119
- "Accept-Language": self.fingerprint["accept_language"],
120
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
121
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
122
- "User-Agent": self.fingerprint["user_agent"],
123
- })
124
-
125
- # Update session headers
126
- for header, value in self.headers.items():
127
- self.session.headers[header] = value
128
-
129
- return self.fingerprint
130
-
131
- def ask(
132
- self,
133
- prompt: str,
134
- stream: bool = False,
135
- raw: bool = False,
136
- optimizer: str = None,
137
- conversationally: bool = False,
138
- temperature: int = 32,
139
- creativity: str = "medium",
140
- selected_focus: list = ["web"],
141
- selected_tools: list = ["quick-cards"]
142
- ) -> Union[Dict[str, Any], Generator]:
143
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
- if optimizer:
145
- if optimizer in self.__available_optimizers:
146
- conversation_prompt = getattr(Optimizers, optimizer)(
147
- conversation_prompt if conversationally else prompt
148
- )
149
- else:
150
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
151
-
152
- # Prepare the request payload
153
- payload = {
154
- "content": conversation_prompt,
155
- "chatId": self.chat_id,
156
- "userMessageId": str(uuid.uuid4()),
157
- "ai_config": {
158
- "selectedFocus": selected_focus,
159
- "selectedTools": selected_tools,
160
- "agentId": "chat",
161
- "modelId": self.model,
162
- "temperature": temperature,
163
- "creativity": creativity
164
- }
165
- }
166
-
167
- def for_stream():
168
- try:
169
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
170
- if response.status_code != 200:
171
- # If we get a non-200 response, try refreshing our identity once
172
- if response.status_code in [403, 429]:
173
- self.refresh_identity()
174
- # Retry with new identity
175
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
176
- if not retry_response.ok:
177
- raise exceptions.FailedToGenerateResponseError(
178
- f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
179
- )
180
- response = retry_response
181
- else:
182
- raise exceptions.FailedToGenerateResponseError(
183
- f"Request failed with status code {response.status_code}"
184
- )
185
-
186
- streaming_text = ""
187
- for line in response.iter_lines():
188
- if line:
189
- try:
190
- line = line.decode('utf-8')
191
- # Handle different message types
192
- if line.startswith('0:'): # Content message
193
- content = line[2:].strip('"')
194
- streaming_text += content
195
- resp = dict(text=content)
196
- yield resp if raw else resp
197
- except (json.JSONDecodeError, UnicodeDecodeError):
198
- continue
199
-
200
- self.last_response = {"text": streaming_text}
201
- self.conversation.update_chat_history(prompt, streaming_text)
202
-
203
- except requests.RequestException as e:
204
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
205
-
206
- def for_non_stream():
207
- try:
208
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
209
- if response.status_code != 200:
210
- if response.status_code in [403, 429]:
211
- self.refresh_identity()
212
- response = self.session.post(self.url, json=payload, timeout=self.timeout)
213
- if not response.ok:
214
- raise exceptions.FailedToGenerateResponseError(
215
- f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
216
- )
217
- else:
218
- raise exceptions.FailedToGenerateResponseError(
219
- f"Request failed with status code {response.status_code}"
220
- )
221
-
222
- full_response = ""
223
- for line in response.iter_lines():
224
- if line:
225
- try:
226
- line = line.decode('utf-8')
227
- if line.startswith('0:'): # Content message
228
- content = line[2:].strip('"')
229
- full_response += content
230
- except (json.JSONDecodeError, UnicodeDecodeError):
231
- continue
232
-
233
- self.last_response = {"text": full_response}
234
- self.conversation.update_chat_history(prompt, full_response)
235
- return {"text": full_response}
236
- except Exception as e:
237
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
238
-
239
- return for_stream() if stream else for_non_stream()
240
-
241
- def chat(
242
- self,
243
- prompt: str,
244
- stream: bool = False,
245
- optimizer: str = None,
246
- conversationally: bool = False,
247
- temperature: int = 32,
248
- creativity: str = "medium",
249
- selected_focus: list = ["web"],
250
- selected_tools: list = []
251
- ) -> Union[str, Generator[str, None, None]]:
252
- def for_stream():
253
- for response in self.ask(
254
- prompt, True, optimizer=optimizer, conversationally=conversationally,
255
- temperature=temperature, creativity=creativity,
256
- selected_focus=selected_focus, selected_tools=selected_tools
257
- ):
258
- yield self.get_message(response)
259
- def for_non_stream():
260
- return self.get_message(
261
- self.ask(
262
- prompt, False, optimizer=optimizer, conversationally=conversationally,
263
- temperature=temperature, creativity=creativity,
264
- selected_focus=selected_focus, selected_tools=selected_tools
265
- )
266
- )
267
- return for_stream() if stream else for_non_stream()
268
-
269
- def get_message(self, response: dict) -> str:
270
- assert isinstance(response, dict), "Response should be of dict data-type only"
271
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
272
-
273
- if __name__ == "__main__":
274
- print("-" * 80)
275
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
276
- print("-" * 80)
277
-
278
- for model in UncovrAI.AVAILABLE_MODELS:
279
- try:
280
- test_ai = UncovrAI(model=model, timeout=60)
281
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
282
- response_text = ""
283
- for chunk in response:
284
- response_text += chunk
285
-
286
- if response_text and len(response_text.strip()) > 0:
287
- status = "✓"
288
- # Clean and truncate response
289
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
290
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
291
- else:
292
- status = "✗"
293
- display_text = "Empty or invalid response"
294
- print(f"\r{model:<50} {status:<10} {display_text}")
295
- except Exception as e:
296
- print(f"\r{model:<50} {'':<10} {str(e)}")
297
-
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import re
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class UncovrAI(Provider):
14
+ """
15
+ A class to interact with the Uncovr AI chat API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "default",
20
+ "gpt-4o-mini",
21
+ "gemini-2-flash",
22
+ "gemini-2-flash-lite",
23
+ "groq-llama-3-1-8b"
24
+ # The following models are not available in the free plan:
25
+ # "o3-mini",
26
+ # "claude-3-7-sonnet",
27
+ # "gpt-4o",
28
+ # "claude-3-5-sonnet-v2",
29
+ # "deepseek-r1-distill-llama-70b",
30
+ # "deepseek-r1-distill-qwen-32b",
31
+ # "gemini-2-flash-lite-preview",
32
+ # "qwen-qwq-32b"
33
+ ]
34
+
35
+ def __init__(
36
+ self,
37
+ is_conversation: bool = True,
38
+ max_tokens: int = 2049,
39
+ timeout: int = 30,
40
+ intro: str = None,
41
+ filepath: str = None,
42
+ update_file: bool = True,
43
+ proxies: dict = {},
44
+ history_offset: int = 10250,
45
+ act: str = None,
46
+ model: str = "default",
47
+ chat_id: str = None,
48
+ user_id: str = None,
49
+ browser: str = "chrome"
50
+ ):
51
+ """Initializes the Uncovr AI API client."""
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
+
55
+ self.url = "https://uncovr.app/api/workflows/chat"
56
+
57
+ # Initialize LitAgent for user agent generation
58
+ self.agent = LitAgent()
59
+ # Use fingerprinting to create a consistent browser identity
60
+ self.fingerprint = self.agent.generate_fingerprint(browser)
61
+
62
+ # Use the fingerprint for headers
63
+ self.headers = {
64
+ "Accept": self.fingerprint["accept"],
65
+ "Accept-Encoding": "gzip, deflate, br, zstd",
66
+ "Accept-Language": self.fingerprint["accept_language"],
67
+ "Content-Type": "application/json",
68
+ "Origin": "https://uncovr.app",
69
+ "Referer": "https://uncovr.app/",
70
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
71
+ "Sec-CH-UA-Mobile": "?0",
72
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
73
+ "User-Agent": self.fingerprint["user_agent"],
74
+ "Sec-Fetch-Dest": "empty",
75
+ "Sec-Fetch-Mode": "cors",
76
+ "Sec-Fetch-Site": "same-origin"
77
+ }
78
+
79
+ self.session = requests.Session()
80
+ self.session.headers.update(self.headers)
81
+ self.session.proxies.update(proxies)
82
+
83
+ self.is_conversation = is_conversation
84
+ self.max_tokens_to_sample = max_tokens
85
+ self.timeout = timeout
86
+ self.last_response = {}
87
+ self.model = model
88
+ self.chat_id = chat_id or str(uuid.uuid4())
89
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
90
+
91
+ self.__available_optimizers = (
92
+ method
93
+ for method in dir(Optimizers)
94
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
95
+ )
96
+ Conversation.intro = (
97
+ AwesomePrompts().get_act(
98
+ act, raise_not_found=True, default=None, case_insensitive=True
99
+ )
100
+ if act
101
+ else intro or Conversation.intro
102
+ )
103
+
104
+ self.conversation = Conversation(
105
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
106
+ )
107
+ self.conversation.history_offset = history_offset
108
+
109
+ def refresh_identity(self, browser: str = None):
110
+ """
111
+ Refreshes the browser identity fingerprint.
112
+
113
+ Args:
114
+ browser: Specific browser to use for the new fingerprint
115
+ """
116
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
117
+ self.fingerprint = self.agent.generate_fingerprint(browser)
118
+
119
+ # Update headers with new fingerprint
120
+ self.headers.update({
121
+ "Accept": self.fingerprint["accept"],
122
+ "Accept-Language": self.fingerprint["accept_language"],
123
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
124
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
125
+ "User-Agent": self.fingerprint["user_agent"],
126
+ })
127
+
128
+ # Update session headers
129
+ for header, value in self.headers.items():
130
+ self.session.headers[header] = value
131
+
132
+ return self.fingerprint
133
+
134
+ def ask(
135
+ self,
136
+ prompt: str,
137
+ stream: bool = False,
138
+ raw: bool = False,
139
+ optimizer: str = None,
140
+ conversationally: bool = False,
141
+ temperature: int = 32,
142
+ creativity: str = "medium",
143
+ selected_focus: list = ["web"],
144
+ selected_tools: list = ["quick-cards"]
145
+ ) -> Union[Dict[str, Any], Generator]:
146
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
147
+ if optimizer:
148
+ if optimizer in self.__available_optimizers:
149
+ conversation_prompt = getattr(Optimizers, optimizer)(
150
+ conversation_prompt if conversationally else prompt
151
+ )
152
+ else:
153
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
154
+
155
+ # Prepare the request payload
156
+ payload = {
157
+ "content": conversation_prompt,
158
+ "chatId": self.chat_id,
159
+ "userMessageId": str(uuid.uuid4()),
160
+ "ai_config": {
161
+ "selectedFocus": selected_focus,
162
+ "selectedTools": selected_tools,
163
+ "agentId": "chat",
164
+ "modelId": self.model,
165
+ "temperature": temperature,
166
+ "creativity": creativity
167
+ }
168
+ }
169
+
170
+ def for_stream():
171
+ try:
172
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
173
+ if response.status_code != 200:
174
+ # If we get a non-200 response, try refreshing our identity once
175
+ if response.status_code in [403, 429]:
176
+ self.refresh_identity()
177
+ # Retry with new identity
178
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
179
+ if not retry_response.ok:
180
+ raise exceptions.FailedToGenerateResponseError(
181
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
182
+ )
183
+ response = retry_response
184
+ else:
185
+ raise exceptions.FailedToGenerateResponseError(
186
+ f"Request failed with status code {response.status_code}"
187
+ )
188
+
189
+ streaming_text = ""
190
+ for line in response.iter_lines():
191
+ if line:
192
+ try:
193
+ line = line.decode('utf-8')
194
+ # Use regex to match content messages
195
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
196
+ if content_match: # Content message
197
+ content = content_match.group(1)
198
+ streaming_text += content
199
+ resp = dict(text=content)
200
+ yield resp if raw else resp
201
+ # Check for error messages
202
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
203
+ if error_match:
204
+ error_msg = error_match.group(1)
205
+ raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
206
+ except (json.JSONDecodeError, UnicodeDecodeError):
207
+ continue
208
+
209
+ self.last_response = {"text": streaming_text}
210
+ self.conversation.update_chat_history(prompt, streaming_text)
211
+
212
+ except requests.RequestException as e:
213
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
214
+
215
+ def for_non_stream():
216
+ try:
217
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
218
+ if response.status_code != 200:
219
+ if response.status_code in [403, 429]:
220
+ self.refresh_identity()
221
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
222
+ if not response.ok:
223
+ raise exceptions.FailedToGenerateResponseError(
224
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
225
+ )
226
+ else:
227
+ raise exceptions.FailedToGenerateResponseError(
228
+ f"Request failed with status code {response.status_code}"
229
+ )
230
+
231
+ full_response = ""
232
+ for line in response.iter_lines():
233
+ if line:
234
+ try:
235
+ line = line.decode('utf-8')
236
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
237
+ if content_match:
238
+ content = content_match.group(1)
239
+ full_response += content
240
+ # Check for error messages
241
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
242
+ if error_match:
243
+ error_msg = error_match.group(1)
244
+ raise exceptions.FailedToGenerateResponseError(f"API Error: {error_msg}")
245
+ except (json.JSONDecodeError, UnicodeDecodeError):
246
+ continue
247
+
248
+ self.last_response = {"text": full_response}
249
+ self.conversation.update_chat_history(prompt, full_response)
250
+ return {"text": full_response}
251
+ except Exception as e:
252
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def chat(
257
+ self,
258
+ prompt: str,
259
+ stream: bool = False,
260
+ optimizer: str = None,
261
+ conversationally: bool = False,
262
+ temperature: int = 32,
263
+ creativity: str = "medium",
264
+ selected_focus: list = ["web"],
265
+ selected_tools: list = []
266
+ ) -> Union[str, Generator[str, None, None]]:
267
+ def for_stream():
268
+ for response in self.ask(
269
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
270
+ temperature=temperature, creativity=creativity,
271
+ selected_focus=selected_focus, selected_tools=selected_tools
272
+ ):
273
+ yield self.get_message(response)
274
+ def for_non_stream():
275
+ return self.get_message(
276
+ self.ask(
277
+ prompt, False, optimizer=optimizer, conversationally=conversationally,
278
+ temperature=temperature, creativity=creativity,
279
+ selected_focus=selected_focus, selected_tools=selected_tools
280
+ )
281
+ )
282
+ return for_stream() if stream else for_non_stream()
283
+
284
+ def get_message(self, response: dict) -> str:
285
+ assert isinstance(response, dict), "Response should be of dict data-type only"
286
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
287
+
288
+ if __name__ == "__main__":
289
+ print("-" * 80)
290
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
291
+ print("-" * 80)
292
+
293
+ for model in UncovrAI.AVAILABLE_MODELS:
294
+ try:
295
+ test_ai = UncovrAI(model=model, timeout=60)
296
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
297
+ response_text = ""
298
+ for chunk in response:
299
+ response_text += chunk
300
+
301
+ if response_text and len(response_text.strip()) > 0:
302
+ status = "✓"
303
+ # Clean and truncate response
304
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
305
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
306
+ else:
307
+ status = "✗"
308
+ display_text = "Empty or invalid response"
309
+ print(f"\r{model:<50} {status:<10} {display_text}")
310
+ except Exception as e:
311
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
312
+
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict
1
+ from typing import Union, Any, Dict
2
2
  from uuid import uuid4
3
3
  import requests
4
4
  import re