webscout 7.6__py3-none-any.whl → 7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (124) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +14 -11
  3. webscout/DWEBS.py +431 -415
  4. webscout/Extra/autocoder/autocoder_utiles.py +183 -47
  5. webscout/Extra/autocoder/rawdog.py +848 -649
  6. webscout/Extra/gguf.py +682 -652
  7. webscout/Provider/AI21.py +1 -1
  8. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  9. webscout/Provider/AISEARCH/ISou.py +2 -23
  10. webscout/Provider/AISEARCH/felo_search.py +6 -6
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/Aitopia.py +292 -0
  13. webscout/Provider/AllenAI.py +5 -22
  14. webscout/Provider/Andi.py +3 -3
  15. webscout/Provider/C4ai.py +1 -1
  16. webscout/Provider/ChatGPTClone.py +226 -0
  17. webscout/Provider/ChatGPTES.py +3 -5
  18. webscout/Provider/ChatGPTGratis.py +4 -4
  19. webscout/Provider/Chatify.py +2 -2
  20. webscout/Provider/Cloudflare.py +3 -2
  21. webscout/Provider/DARKAI.py +3 -2
  22. webscout/Provider/DeepSeek.py +2 -2
  23. webscout/Provider/Deepinfra.py +1 -1
  24. webscout/Provider/EDITEE.py +1 -1
  25. webscout/Provider/ElectronHub.py +178 -96
  26. webscout/Provider/ExaChat.py +310 -0
  27. webscout/Provider/Free2GPT.py +2 -2
  28. webscout/Provider/Gemini.py +5 -19
  29. webscout/Provider/GithubChat.py +1 -1
  30. webscout/Provider/Glider.py +12 -8
  31. webscout/Provider/Groq.py +3 -3
  32. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  33. webscout/Provider/HeckAI.py +1 -1
  34. webscout/Provider/HuggingFaceChat.py +1 -1
  35. webscout/Provider/Hunyuan.py +272 -0
  36. webscout/Provider/Jadve.py +3 -3
  37. webscout/Provider/Koboldai.py +3 -3
  38. webscout/Provider/LambdaChat.py +391 -0
  39. webscout/Provider/Llama.py +3 -5
  40. webscout/Provider/Llama3.py +4 -12
  41. webscout/Provider/Marcus.py +3 -3
  42. webscout/Provider/OLLAMA.py +260 -36
  43. webscout/Provider/Openai.py +7 -3
  44. webscout/Provider/PI.py +1 -1
  45. webscout/Provider/Perplexitylabs.py +1 -1
  46. webscout/Provider/Phind.py +1 -1
  47. webscout/Provider/PizzaGPT.py +1 -1
  48. webscout/Provider/QwenLM.py +4 -7
  49. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +21 -46
  50. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +37 -49
  51. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  52. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  53. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  54. webscout/Provider/TTI/__init__.py +3 -1
  55. webscout/Provider/TTI/artbit/async_artbit.py +4 -33
  56. webscout/Provider/TTI/artbit/sync_artbit.py +4 -32
  57. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  58. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  59. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  60. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  61. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  62. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  63. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  65. webscout/Provider/TTS/utils.py +1 -1
  66. webscout/Provider/TeachAnything.py +1 -1
  67. webscout/Provider/TextPollinationsAI.py +4 -4
  68. webscout/Provider/TwoAI.py +1 -2
  69. webscout/Provider/Venice.py +4 -2
  70. webscout/Provider/VercelAI.py +234 -0
  71. webscout/Provider/WebSim.py +228 -0
  72. webscout/Provider/WiseCat.py +10 -12
  73. webscout/Provider/Youchat.py +1 -1
  74. webscout/Provider/__init__.py +22 -1
  75. webscout/Provider/ai4chat.py +1 -1
  76. webscout/Provider/aimathgpt.py +2 -6
  77. webscout/Provider/akashgpt.py +1 -1
  78. webscout/Provider/askmyai.py +4 -4
  79. webscout/Provider/asksteve.py +203 -0
  80. webscout/Provider/bagoodex.py +2 -2
  81. webscout/Provider/cerebras.py +1 -1
  82. webscout/Provider/chatglm.py +4 -4
  83. webscout/Provider/cleeai.py +1 -0
  84. webscout/Provider/copilot.py +427 -415
  85. webscout/Provider/elmo.py +1 -1
  86. webscout/Provider/flowith.py +14 -3
  87. webscout/Provider/freeaichat.py +57 -31
  88. webscout/Provider/gaurish.py +3 -5
  89. webscout/Provider/geminiprorealtime.py +1 -1
  90. webscout/Provider/granite.py +4 -4
  91. webscout/Provider/hermes.py +5 -5
  92. webscout/Provider/julius.py +1 -1
  93. webscout/Provider/koala.py +1 -1
  94. webscout/Provider/labyrinth.py +239 -0
  95. webscout/Provider/learnfastai.py +28 -15
  96. webscout/Provider/lepton.py +1 -1
  97. webscout/Provider/llama3mitril.py +4 -4
  98. webscout/Provider/llamatutor.py +1 -1
  99. webscout/Provider/llmchat.py +3 -3
  100. webscout/Provider/meta.py +1 -1
  101. webscout/Provider/multichat.py +10 -10
  102. webscout/Provider/promptrefine.py +1 -1
  103. webscout/Provider/searchchat.py +293 -0
  104. webscout/Provider/sonus.py +208 -0
  105. webscout/Provider/talkai.py +2 -2
  106. webscout/Provider/turboseek.py +1 -1
  107. webscout/Provider/tutorai.py +1 -1
  108. webscout/Provider/typegpt.py +6 -43
  109. webscout/Provider/uncovr.py +299 -0
  110. webscout/Provider/x0gpt.py +1 -1
  111. webscout/__init__.py +36 -36
  112. webscout/cli.py +293 -283
  113. webscout/litagent/agent.py +14 -9
  114. webscout/tempid.py +11 -11
  115. webscout/utils.py +2 -2
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +1282 -1223
  118. webscout/webscout_search_async.py +813 -692
  119. {webscout-7.6.dist-info → webscout-7.8.dist-info}/METADATA +76 -44
  120. {webscout-7.6.dist-info → webscout-7.8.dist-info}/RECORD +124 -106
  121. {webscout-7.6.dist-info → webscout-7.8.dist-info}/LICENSE.md +0 -0
  122. {webscout-7.6.dist-info → webscout-7.8.dist-info}/WHEEL +0 -0
  123. {webscout-7.6.dist-info → webscout-7.8.dist-info}/entry_points.txt +0 -0
  124. {webscout-7.6.dist-info → webscout-7.8.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,7 @@ from webscout.AIutel import Conversation
6
6
  from webscout.AIutel import AwesomePrompts
7
7
  from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
- from webscout import LitAgent as Lit
9
+ from webscout.litagent import LitAgent as Lit
10
10
 
11
11
  class LlamaTutor(Provider):
12
12
  """
@@ -1,13 +1,13 @@
1
1
  import requests
2
2
  import json
3
- from typing import Any, Dict, Optional, Generator, List
3
+ from typing import Union, Any, Dict, Optional, Generator, List
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout import LitAgent as Lit
10
+ from webscout.litagent import LitAgent as Lit
11
11
 
12
12
  class LLMChat(Provider):
13
13
  """
@@ -157,7 +157,7 @@ class LLMChat(Provider):
157
157
  stream: bool = False,
158
158
  optimizer: str = None,
159
159
  conversationally: bool = False,
160
- ) -> str | Generator[str, None, None]:
160
+ ) -> Union[str, Generator[str, None, None]]:
161
161
  """Generate response with logging capabilities"""
162
162
 
163
163
  def for_stream():
webscout/Provider/meta.py CHANGED
@@ -14,7 +14,7 @@ from webscout.AIutel import Conversation
14
14
  from webscout.AIutel import AwesomePrompts, sanitize_stream
15
15
  from webscout.AIbase import Provider
16
16
  from webscout import exceptions
17
- from webscout import LitAgent as Lit
17
+ from webscout.litagent import LitAgent as Lit
18
18
  MAX_RETRIES = 3
19
19
 
20
20
  def generate_offline_threading_id() -> str:
@@ -1,7 +1,7 @@
1
1
  import requests
2
2
  import json
3
3
  import uuid
4
- from typing import Any, Dict
4
+ from typing import Any, Dict, Union
5
5
  from datetime import datetime
6
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
7
  from webscout.AIbase import Provider
@@ -76,11 +76,11 @@ class MultiChatAI(Provider):
76
76
  "deepseek-r1-distill-llama-70b",
77
77
 
78
78
  # Cohere Models
79
- # "command-r", >>>> NOT WORKING
80
- # "command", >>>> NOT WORKING
79
+ # "command-r", # >>>> NOT WORKING
80
+ # "command", # >>>> NOT WORKING
81
81
 
82
82
  # Google Models
83
- # "gemini-1.5-flash-002", >>>> NOT WORKING
83
+ # "gemini-1.5-flash-002", #>>>> NOT WORKING
84
84
  "gemma2-9b-it",
85
85
  "gemini-2.0-flash",
86
86
 
@@ -91,13 +91,13 @@ class MultiChatAI(Provider):
91
91
  "deepseek-ai/DeepSeek-V3",
92
92
  "meta-llama/Meta-Llama-3.1-405B-Instruct",
93
93
  "NousResearch/Hermes-3-Llama-3.1-405B",
94
- # "gemma-2-27b-it", >>>> NOT WORKING
94
+ # "gemma-2-27b-it", # >>>> NOT WORKING
95
95
 
96
96
  # Mistral Models
97
- # "mistral-small-latest", >>>> NOT WORKING
98
- # "codestral-latest", >>>> NOT WORKING
99
- # "open-mistral-7b", >>>> NOT WORKING
100
- # "open-mixtral-8x7b", >>>> NOT WORKING
97
+ # "mistral-small-latest", # >>>> NOT WORKING
98
+ # "codestral-latest", # >>>> NOT WORKING
99
+ # "open-mistral-7b", # >>>> NOT WORKING
100
+ # "open-mixtral-8x7b", # >>>> NOT WORKING
101
101
 
102
102
  # Alibaba Models
103
103
  "Qwen/Qwen2.5-72B-Instruct",
@@ -284,7 +284,7 @@ class MultiChatAI(Provider):
284
284
  )
285
285
  return self.get_message(response)
286
286
 
287
- def get_message(self, response: Dict[str, Any] | str) -> str:
287
+ def get_message(self, response: Union[Dict[str, Any], str]) -> str:
288
288
  """
289
289
  Retrieves message from response.
290
290
 
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
- from webscout import LitAgent as UserAgent
9
+ from webscout.litagent import LitAgent as UserAgent
10
10
 
11
11
  class PromptRefine(Provider):
12
12
  """
@@ -0,0 +1,293 @@
1
+ import requests
2
+ import json
3
+ from datetime import datetime
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class SearchChatAI(Provider):
14
+ """
15
+ A class to interact with the SearchChatAI API.
16
+ """
17
+ AVAILABLE_MODELS = ["gpt-4o-mini-2024-07-18"]
18
+ def __init__(
19
+ self,
20
+ is_conversation: bool = True,
21
+ max_tokens: int = 2049,
22
+ timeout: int = 30,
23
+ intro: str = None,
24
+ filepath: str = None,
25
+ update_file: bool = True,
26
+ proxies: dict = {},
27
+ history_offset: int = 10250,
28
+ act: str = None,
29
+ system_prompt: str = "You are a helpful assistant."
30
+ ):
31
+ """Initializes the SearchChatAI API client."""
32
+ self.url = "https://search-chat.ai/api/chat-test-stop.php"
33
+ self.timeout = timeout
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.last_response = {}
37
+ self.system_prompt = system_prompt
38
+
39
+ # Initialize LitAgent for user agent generation
40
+ self.agent = LitAgent()
41
+ # Use fingerprinting to create a consistent browser identity
42
+ self.fingerprint = self.agent.generate_fingerprint("chrome")
43
+
44
+ # Use the fingerprint for headers
45
+ self.headers = {
46
+ "Accept": self.fingerprint["accept"],
47
+ "Accept-Encoding": "gzip, deflate, br, zstd",
48
+ "Accept-Language": self.fingerprint["accept_language"],
49
+ "Content-Type": "application/json",
50
+ "Origin": "https://search-chat.ai",
51
+ "Referer": "https://search-chat.ai/platform/?v2=2",
52
+ "Sec-Fetch-Dest": "empty",
53
+ "Sec-Fetch-Mode": "cors",
54
+ "Sec-Fetch-Site": "same-origin",
55
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
56
+ "Sec-CH-UA-Mobile": "?0",
57
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
58
+ "User-Agent": self.fingerprint["user_agent"],
59
+ }
60
+
61
+ self.session = requests.Session()
62
+ self.session.headers.update(self.headers)
63
+ self.session.proxies.update(proxies)
64
+
65
+ self.__available_optimizers = (
66
+ method
67
+ for method in dir(Optimizers)
68
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
69
+ )
70
+ Conversation.intro = (
71
+ AwesomePrompts().get_act(
72
+ act, raise_not_found=True, default=None, case_insensitive=True
73
+ )
74
+ if act
75
+ else intro or Conversation.intro
76
+ )
77
+
78
+ self.conversation = Conversation(
79
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
80
+ )
81
+ self.conversation.history_offset = history_offset
82
+
83
+ def refresh_identity(self, browser: str = None):
84
+ """
85
+ Refreshes the browser identity fingerprint.
86
+
87
+ Args:
88
+ browser: Specific browser to use for the new fingerprint
89
+ """
90
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
91
+ self.fingerprint = self.agent.generate_fingerprint(browser)
92
+
93
+ # Update headers with new fingerprint
94
+ self.headers.update({
95
+ "Accept": self.fingerprint["accept"],
96
+ "Accept-Language": self.fingerprint["accept_language"],
97
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
98
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
99
+ "User-Agent": self.fingerprint["user_agent"],
100
+ })
101
+
102
+ # Update session headers
103
+ for header, value in self.headers.items():
104
+ self.session.headers[header] = value
105
+
106
+ return self.fingerprint
107
+
108
+ def ask(
109
+ self,
110
+ prompt: str,
111
+ stream: bool = False,
112
+ raw: bool = False,
113
+ optimizer: str = None,
114
+ conversationally: bool = False,
115
+ ) -> Union[Dict[str, Any], Generator]:
116
+ """
117
+ Send a message to the API and get the response.
118
+
119
+ Args:
120
+ prompt: The message to send
121
+ stream: Whether to stream the response
122
+ raw: Whether to return raw response
123
+ optimizer: The optimizer to use
124
+ conversationally: Whether to use conversation history
125
+
126
+ Returns:
127
+ Either a dictionary with the response or a generator for streaming
128
+ """
129
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
+ if optimizer:
131
+ if optimizer in self.__available_optimizers:
132
+ conversation_prompt = getattr(Optimizers, optimizer)(
133
+ conversation_prompt if conversationally else prompt
134
+ )
135
+ else:
136
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
137
+
138
+ payload = {
139
+ "messages": [
140
+ {
141
+ "role": "user",
142
+ "content": [
143
+ {
144
+ "type": "text",
145
+ "text": conversation_prompt
146
+ }
147
+ ],
148
+ "timestamp": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
149
+ }
150
+ ]
151
+ }
152
+
153
+ def for_stream():
154
+ try:
155
+ with self.session.post(
156
+ self.url,
157
+ json=payload,
158
+ stream=True,
159
+ timeout=self.timeout
160
+ ) as response:
161
+ if response.status_code != 200:
162
+ raise exceptions.FailedToGenerateResponseError(
163
+ f"Request failed with status code {response.status_code}"
164
+ )
165
+
166
+ streaming_text = ""
167
+ for line in response.iter_lines():
168
+ if line:
169
+ line = line.decode('utf-8')
170
+ if line.startswith('data: '):
171
+ data_str = line[6:] # Remove 'data: ' prefix
172
+
173
+ if data_str == '[DONE]':
174
+ break
175
+
176
+ try:
177
+ data = json.loads(data_str)
178
+ if "choices" in data and len(data["choices"]) > 0:
179
+ delta = data["choices"][0].get("delta", {})
180
+ if "content" in delta:
181
+ content = delta["content"]
182
+ streaming_text += content
183
+ resp = dict(text=content)
184
+ yield resp if raw else content
185
+ except json.JSONDecodeError:
186
+ continue
187
+
188
+ self.last_response = {"text": streaming_text}
189
+ self.conversation.update_chat_history(prompt, streaming_text)
190
+
191
+ except requests.RequestException as e:
192
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
193
+
194
+ def for_non_stream():
195
+ try:
196
+ response = self.session.post(
197
+ self.url,
198
+ json=payload,
199
+ stream=True, # Keep streaming enabled
200
+ timeout=self.timeout
201
+ )
202
+ if response.status_code != 200:
203
+ raise exceptions.FailedToGenerateResponseError(
204
+ f"Request failed with status code {response.status_code}"
205
+ )
206
+
207
+ full_text = ""
208
+ for line in response.iter_lines():
209
+ if line:
210
+ line = line.decode('utf-8')
211
+ if line.startswith('data: '):
212
+ data_str = line[6:] # Remove 'data: ' prefix
213
+
214
+ if data_str == '[DONE]':
215
+ break
216
+
217
+ try:
218
+ data = json.loads(data_str)
219
+ if "choices" in data and len(data["choices"]) > 0:
220
+ delta = data["choices"][0].get("delta", {})
221
+ if "content" in delta:
222
+ content = delta["content"]
223
+ full_text += content
224
+ except json.JSONDecodeError:
225
+ continue
226
+
227
+ if full_text:
228
+ self.last_response = {"text": full_text}
229
+ self.conversation.update_chat_history(prompt, full_text)
230
+ return {"text": full_text}
231
+ else:
232
+ raise exceptions.FailedToGenerateResponseError("No response content found")
233
+
234
+ except Exception as e:
235
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
236
+
237
+ return for_stream() if stream else for_non_stream()
238
+
239
+ def chat(
240
+ self,
241
+ prompt: str,
242
+ stream: bool = False,
243
+ optimizer: str = None,
244
+ conversationally: bool = False,
245
+ ) -> Union[str, Generator[str, None, None]]:
246
+ """
247
+ Chat with the API.
248
+
249
+ Args:
250
+ prompt: The message to send
251
+ stream: Whether to stream the response
252
+ optimizer: The optimizer to use
253
+ conversationally: Whether to use conversation history
254
+
255
+ Returns:
256
+ Either a string response or a generator for streaming
257
+ """
258
+ def for_stream():
259
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
260
+ yield self.get_message(response)
261
+
262
+ def for_non_stream():
263
+ return self.get_message(
264
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
265
+ )
266
+
267
+ return for_stream() if stream else for_non_stream()
268
+
269
+ def get_message(self, response: dict) -> str:
270
+ """Extract the message from the response."""
271
+ assert isinstance(response, dict), "Response should be of dict data-type only"
272
+ return response["text"]
273
+
274
+ if __name__ == "__main__":
275
+ print("-" * 80)
276
+ print(f"{'Status':<10} {'Response'}")
277
+ print("-" * 80)
278
+
279
+ try:
280
+ test_ai = SearchChatAI(timeout=60)
281
+ response = test_ai.chat("Say 'Hello' in one word")
282
+ response_text = response
283
+
284
+ if response_text and len(response_text.strip()) > 0:
285
+ status = "✓"
286
+ # Truncate response if too long
287
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
288
+ else:
289
+ status = "✗"
290
+ display_text = "Empty or invalid response"
291
+ print(f"{status:<10} {display_text}")
292
+ except Exception as e:
293
+ print(f"{'✗':<10} {str(e)}")
@@ -0,0 +1,208 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+ from webscout.litagent import LitAgent
10
+ class SonusAI(Provider):
11
+ """
12
+ A class to interact with the Sonus AI chat API.
13
+ """
14
+
15
+ AVAILABLE_MODELS = [
16
+ "pro",
17
+ "air",
18
+ "mini"
19
+ ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ model: str = "pro"
33
+ ):
34
+ """Initializes the Sonus AI API client."""
35
+ if model not in self.AVAILABLE_MODELS:
36
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
+
38
+ self.url = "https://chat.sonus.ai/chat.php"
39
+
40
+ # Headers for the request
41
+ self.headers = {
42
+ 'Accept': '*/*',
43
+ 'Accept-Language': 'en-US,en;q=0.9',
44
+ 'Origin': 'https://chat.sonus.ai',
45
+ 'Referer': 'https://chat.sonus.ai/',
46
+ 'User-Agent': LitAgent().random()
47
+ }
48
+
49
+ self.session = requests.Session()
50
+ self.session.headers.update(self.headers)
51
+ self.session.proxies.update(proxies)
52
+
53
+ self.is_conversation = is_conversation
54
+ self.max_tokens_to_sample = max_tokens
55
+ self.timeout = timeout
56
+ self.last_response = {}
57
+ self.model = model
58
+
59
+ self.__available_optimizers = (
60
+ method
61
+ for method in dir(Optimizers)
62
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
+ )
64
+ Conversation.intro = (
65
+ AwesomePrompts().get_act(
66
+ act, raise_not_found=True, default=None, case_insensitive=True
67
+ )
68
+ if act
69
+ else intro or Conversation.intro
70
+ )
71
+
72
+ self.conversation = Conversation(
73
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
74
+ )
75
+ self.conversation.history_offset = history_offset
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ reasoning: bool = False,
85
+ ) -> Union[Dict[str, Any], Generator]:
86
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
+ if optimizer:
88
+ if optimizer in self.__available_optimizers:
89
+ conversation_prompt = getattr(Optimizers, optimizer)(
90
+ conversation_prompt if conversationally else prompt
91
+ )
92
+ else:
93
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
94
+
95
+ # Prepare the multipart form data
96
+ files = {
97
+ 'message': (None, conversation_prompt),
98
+ 'history': (None),
99
+ 'reasoning': (None, str(reasoning).lower()),
100
+ 'model': (None, self.model)
101
+ }
102
+
103
+ def for_stream():
104
+ try:
105
+ with requests.post(self.url, files=files, headers=self.headers, stream=True, timeout=self.timeout) as response:
106
+ if response.status_code != 200:
107
+ raise exceptions.FailedToGenerateResponseError(
108
+ f"Request failed with status code {response.status_code}"
109
+ )
110
+
111
+ streaming_text = ""
112
+ for line in response.iter_lines():
113
+ if line:
114
+ try:
115
+ # Decode the line and remove 'data: ' prefix if present
116
+ line = line.decode('utf-8')
117
+ if line.startswith('data: '):
118
+ line = line[6:]
119
+
120
+ data = json.loads(line)
121
+ if "content" in data:
122
+ content = data["content"]
123
+ streaming_text += content
124
+ resp = dict(text=content)
125
+ yield resp if raw else resp
126
+ except (json.JSONDecodeError, UnicodeDecodeError):
127
+ continue
128
+
129
+ self.last_response = {"text": streaming_text}
130
+ self.conversation.update_chat_history(prompt, streaming_text)
131
+
132
+ except requests.RequestException as e:
133
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
134
+
135
+ def for_non_stream():
136
+ try:
137
+ response = requests.post(self.url, files=files, headers=self.headers, timeout=self.timeout)
138
+ if response.status_code != 200:
139
+ raise exceptions.FailedToGenerateResponseError(
140
+ f"Request failed with status code {response.status_code}"
141
+ )
142
+
143
+ full_response = ""
144
+ for line in response.iter_lines():
145
+ if line:
146
+ try:
147
+ line = line.decode('utf-8')
148
+ if line.startswith('data: '):
149
+ line = line[6:]
150
+ data = json.loads(line)
151
+ if "content" in data:
152
+ full_response += data["content"]
153
+ except (json.JSONDecodeError, UnicodeDecodeError):
154
+ continue
155
+
156
+ self.last_response = {"text": full_response}
157
+ self.conversation.update_chat_history(prompt, full_response)
158
+ return {"text": full_response}
159
+ except Exception as e:
160
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
161
+
162
+ return for_stream() if stream else for_non_stream()
163
+
164
+ def chat(
165
+ self,
166
+ prompt: str,
167
+ stream: bool = False,
168
+ optimizer: str = None,
169
+ conversationally: bool = False,
170
+ reasoning: bool = False,
171
+ ) -> Union[str, Generator[str, None, None]]:
172
+ def for_stream():
173
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning):
174
+ yield self.get_message(response)
175
+ def for_non_stream():
176
+ return self.get_message(
177
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning)
178
+ )
179
+ return for_stream() if stream else for_non_stream()
180
+
181
+ def get_message(self, response: dict) -> str:
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response["text"]
184
+
185
+ if __name__ == "__main__":
186
+ print("-" * 80)
187
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
188
+ print("-" * 80)
189
+
190
+ for model in SonusAI.AVAILABLE_MODELS:
191
+ try:
192
+ test_ai = SonusAI(model=model, timeout=60)
193
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
194
+ response_text = ""
195
+ for chunk in response:
196
+ response_text += chunk
197
+
198
+ if response_text and len(response_text.strip()) > 0:
199
+ status = "✓"
200
+ # Clean and truncate response
201
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
202
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
203
+ else:
204
+ status = "✗"
205
+ display_text = "Empty or invalid response"
206
+ print(f"\r{model:<50} {status:<10} {display_text}")
207
+ except Exception as e:
208
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,7 +1,7 @@
1
1
  import uuid
2
2
  import cloudscraper
3
3
  import json
4
- from typing import Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Optional, Generator
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
@@ -146,7 +146,7 @@ class Talkai(Provider):
146
146
  stream: bool = False,
147
147
  optimizer: str = None,
148
148
  conversationally: bool = False,
149
- ) -> str | Generator[str, None, None]:
149
+ ) -> Union[str, Generator[str, None, None]]:
150
150
  """Generate response `str`
151
151
  Args:
152
152
  prompt (str): Prompt to be send.
@@ -6,7 +6,7 @@ from webscout.AIutel import Conversation
6
6
  from webscout.AIutel import AwesomePrompts, sanitize_stream
7
7
  from webscout.AIbase import Provider, AsyncProvider
8
8
  from webscout import exceptions
9
- from typing import Any, AsyncGenerator, Dict
9
+ from typing import Union, Any, AsyncGenerator, Dict
10
10
  from webscout.litagent import LitAgent
11
11
 
12
12
  class TurboSeek(Provider):
@@ -1,6 +1,6 @@
1
1
  import requests
2
2
  import os
3
- from typing import List, Optional
3
+ from typing import Union, List, Optional
4
4
  from string import punctuation
5
5
  from random import choice
6
6
  import json