webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,303 +1,308 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- from webscout.litagent import LitAgent
8
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
- from .utils import (
10
- ChatCompletion,
11
- ChatCompletionChunk,
12
- Choice,
13
- ChatCompletionMessage,
14
- ChoiceDelta,
15
- CompletionUsage,
16
- format_prompt
17
- )
18
-
19
- # ANSI escape codes for formatting
20
- BOLD = "\033[1m"
21
- RED = "\033[91m"
22
- RESET = "\033[0m"
23
-
24
- class Completions(BaseCompletions):
25
- def __init__(self, client: 'SonusAI'):
26
- self._client = client
27
-
28
- def create(
29
- self,
30
- *,
31
- model: str,
32
- messages: List[Dict[str, str]],
33
- max_tokens: Optional[int] = None, # Not used by SonusAI but kept for compatibility
34
- stream: bool = False,
35
- temperature: Optional[float] = None, # Not used by SonusAI but kept for compatibility
36
- top_p: Optional[float] = None, # Not used by SonusAI but kept for compatibility
37
- **kwargs: Any # Not used by SonusAI but kept for compatibility
38
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
- """
40
- Creates a model response for the given chat conversation.
41
- Mimics openai.chat.completions.create
42
- """
43
- # Format the messages using the format_prompt utility
44
- # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
45
- # SonusAI works better with a properly formatted conversation
46
- question = format_prompt(messages, add_special_tokens=True, do_continue=True)
47
-
48
- # Extract reasoning parameter if provided
49
- reasoning = kwargs.get('reasoning', False)
50
-
51
- # Prepare the multipart form data for SonusAI API
52
- files = {
53
- 'message': (None, question),
54
- 'history': (None),
55
- 'reasoning': (None, str(reasoning).lower()),
56
- 'model': (None, self._client.convert_model_name(model))
57
- }
58
-
59
- request_id = f"chatcmpl-{uuid.uuid4()}"
60
- created_time = int(time.time())
61
-
62
- if stream:
63
- return self._create_stream(request_id, created_time, model, files)
64
- else:
65
- return self._create_non_stream(request_id, created_time, model, files)
66
-
67
- def _create_stream(
68
- self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
69
- ) -> Generator[ChatCompletionChunk, None, None]:
70
- try:
71
- response = requests.post(
72
- self._client.url,
73
- files=files,
74
- headers=self._client.headers,
75
- stream=True,
76
- timeout=self._client.timeout
77
- )
78
- response.raise_for_status()
79
-
80
- # Track token usage across chunks
81
- completion_tokens = 0
82
- streaming_text = ""
83
-
84
- for line in response.iter_lines():
85
- if not line:
86
- continue
87
-
88
- try:
89
- # Decode the line and remove 'data: ' prefix if present
90
- line_text = line.decode('utf-8')
91
- if line_text.startswith('data: '):
92
- line_text = line_text[6:]
93
-
94
- data = json.loads(line_text)
95
- if "content" in data:
96
- content = data["content"]
97
- streaming_text += content
98
- completion_tokens += len(content) // 4 # Rough estimate
99
-
100
- # Create a delta object for this chunk
101
- delta = ChoiceDelta(content=content)
102
- choice = Choice(index=0, delta=delta, finish_reason=None)
103
-
104
- chunk = ChatCompletionChunk(
105
- id=request_id,
106
- choices=[choice],
107
- created=created_time,
108
- model=model,
109
- )
110
-
111
- yield chunk
112
- except (json.JSONDecodeError, UnicodeDecodeError):
113
- continue
114
-
115
- # Final chunk with finish_reason
116
- delta = ChoiceDelta(content=None)
117
- choice = Choice(index=0, delta=delta, finish_reason="stop")
118
-
119
- chunk = ChatCompletionChunk(
120
- id=request_id,
121
- choices=[choice],
122
- created=created_time,
123
- model=model,
124
- )
125
-
126
- yield chunk
127
-
128
- except requests.exceptions.RequestException as e:
129
- print(f"{RED}Error during SonusAI stream request: {e}{RESET}")
130
- raise IOError(f"SonusAI request failed: {e}") from e
131
-
132
- def _create_non_stream(
133
- self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
134
- ) -> ChatCompletion:
135
- try:
136
- response = requests.post(
137
- self._client.url,
138
- files=files,
139
- headers=self._client.headers,
140
- timeout=self._client.timeout
141
- )
142
- response.raise_for_status()
143
-
144
- full_response = ""
145
- for line in response.iter_lines():
146
- if line:
147
- try:
148
- line_text = line.decode('utf-8')
149
- if line_text.startswith('data: '):
150
- line_text = line_text[6:]
151
- data = json.loads(line_text)
152
- if "content" in data:
153
- full_response += data["content"]
154
- except (json.JSONDecodeError, UnicodeDecodeError):
155
- continue
156
-
157
- # Create usage statistics (estimated)
158
- prompt_tokens = len(files['message'][1]) // 4
159
- completion_tokens = len(full_response) // 4
160
- total_tokens = prompt_tokens + completion_tokens
161
-
162
- usage = CompletionUsage(
163
- prompt_tokens=prompt_tokens,
164
- completion_tokens=completion_tokens,
165
- total_tokens=total_tokens
166
- )
167
-
168
- # Create the message object
169
- message = ChatCompletionMessage(
170
- role="assistant",
171
- content=full_response
172
- )
173
-
174
- # Create the choice object
175
- choice = Choice(
176
- index=0,
177
- message=message,
178
- finish_reason="stop"
179
- )
180
-
181
- # Create the completion object
182
- completion = ChatCompletion(
183
- id=request_id,
184
- choices=[choice],
185
- created=created_time,
186
- model=model,
187
- usage=usage,
188
- )
189
-
190
- return completion
191
-
192
- except Exception as e:
193
- print(f"{RED}Error during SonusAI non-stream request: {e}{RESET}")
194
- raise IOError(f"SonusAI request failed: {e}") from e
195
-
196
- class Chat(BaseChat):
197
- def __init__(self, client: 'SonusAI'):
198
- self.completions = Completions(client)
199
-
200
- class SonusAI(OpenAICompatibleProvider):
201
- """
202
- OpenAI-compatible client for Sonus AI API.
203
-
204
- Usage:
205
- client = SonusAI()
206
- response = client.chat.completions.create(
207
- model="pro",
208
- messages=[{"role": "user", "content": "Hello!"}]
209
- )
210
- print(response.choices[0].message.content)
211
- """
212
-
213
- AVAILABLE_MODELS = [
214
- "pro",
215
- "air",
216
- "mini"
217
- ]
218
-
219
- def __init__(
220
- self,
221
- timeout: int = 30
222
- ):
223
- """
224
- Initialize the SonusAI client.
225
-
226
- Args:
227
- timeout: Request timeout in seconds.
228
- """
229
- self.timeout = timeout
230
- self.url = "https://chat.sonus.ai/chat.php"
231
-
232
- # Headers for the request
233
- agent = LitAgent()
234
- self.headers = {
235
- 'Accept': '*/*',
236
- 'Accept-Language': 'en-US,en;q=0.9',
237
- 'Origin': 'https://chat.sonus.ai',
238
- 'Referer': 'https://chat.sonus.ai/',
239
- 'User-Agent': agent.random()
240
- }
241
-
242
- self.session = requests.Session()
243
- self.session.headers.update(self.headers)
244
-
245
- # Initialize the chat interface
246
- self.chat = Chat(self)
247
-
248
- def convert_model_name(self, model: str) -> str:
249
- """
250
- Ensure the model name is in the correct format.
251
- """
252
- if model in self.AVAILABLE_MODELS:
253
- return model
254
-
255
- # Try to find a matching model
256
- for available_model in self.AVAILABLE_MODELS:
257
- if model.lower() in available_model.lower():
258
- return available_model
259
-
260
- # Default to pro if no match
261
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'pro'{RESET}")
262
- return "pro"
263
-
264
- @property
265
- def models(self):
266
- class _ModelList:
267
- def list(inner_self):
268
- return type(self).AVAILABLE_MODELS
269
- return _ModelList()
270
-
271
-
272
-
273
-
274
- # Simple test if run directly
275
- if __name__ == "__main__":
276
- print("-" * 80)
277
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
278
- print("-" * 80)
279
-
280
- for model in SonusAI.AVAILABLE_MODELS:
281
- try:
282
- client = SonusAI(timeout=60)
283
- # Test with a simple conversation to demonstrate format_prompt usage
284
- response = client.chat.completions.create(
285
- model=model,
286
- messages=[
287
- {"role": "system", "content": "You are a helpful assistant."},
288
- {"role": "user", "content": "Say 'Hello' in one word"},
289
- ],
290
- stream=False
291
- )
292
-
293
- if response and response.choices and response.choices[0].message.content:
294
- status = "✓"
295
- # Truncate response if too long
296
- display_text = response.choices[0].message.content.strip()
297
- display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
298
- else:
299
- status = ""
300
- display_text = "Empty or invalid response"
301
- print(f"{model:<50} {status:<10} {display_text}")
302
- except Exception as e:
303
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt,
17
+ count_tokens
18
+ )
19
+
20
+ # ANSI escape codes for formatting
21
+ BOLD = "\033[1m"
22
+ RED = "\033[91m"
23
+ RESET = "\033[0m"
24
+
25
+ class Completions(BaseCompletions):
26
+ def __init__(self, client: 'SonusAI'):
27
+ self._client = client
28
+
29
+ def create(
30
+ self,
31
+ *,
32
+ model: str,
33
+ messages: List[Dict[str, str]],
34
+ max_tokens: Optional[int] = None, # Not used by SonusAI but kept for compatibility
35
+ stream: bool = False,
36
+ temperature: Optional[float] = None, # Not used by SonusAI but kept for compatibility
37
+ top_p: Optional[float] = None, # Not used by SonusAI but kept for compatibility
38
+ timeout: Optional[int] = None,
39
+ proxies: Optional[Dict[str, str]] = None,
40
+ **kwargs: Any # Not used by SonusAI but kept for compatibility
41
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
+ """
43
+ Creates a model response for the given chat conversation.
44
+ Mimics openai.chat.completions.create
45
+ """
46
+ # Format the messages using the format_prompt utility
47
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
48
+ # SonusAI works better with a properly formatted conversation
49
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
50
+
51
+ # Extract reasoning parameter if provided
52
+ reasoning = kwargs.get('reasoning', False)
53
+
54
+ # Prepare the multipart form data for SonusAI API
55
+ files = {
56
+ 'message': (None, question),
57
+ 'history': (None),
58
+ 'reasoning': (None, str(reasoning).lower()),
59
+ 'model': (None, self._client.convert_model_name(model))
60
+ }
61
+
62
+ request_id = f"chatcmpl-{uuid.uuid4()}"
63
+ created_time = int(time.time())
64
+
65
+ if stream:
66
+ return self._create_stream(request_id, created_time, model, files, timeout, proxies)
67
+ else:
68
+ return self._create_non_stream(request_id, created_time, model, files, timeout, proxies)
69
+
70
+ def _create_stream(
71
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
72
+ ) -> Generator[ChatCompletionChunk, None, None]:
73
+ try:
74
+ response = requests.post(
75
+ self._client.url,
76
+ files=files,
77
+ headers=self._client.headers,
78
+ stream=True,
79
+ timeout=timeout or self._client.timeout,
80
+ proxies=proxies or getattr(self._client, "proxies", None)
81
+ )
82
+ response.raise_for_status()
83
+
84
+ # Track token usage across chunks
85
+ completion_tokens = 0
86
+ streaming_text = ""
87
+
88
+ for line in response.iter_lines():
89
+ if not line:
90
+ continue
91
+
92
+ try:
93
+ # Decode the line and remove 'data: ' prefix if present
94
+ line_text = line.decode('utf-8')
95
+ if line_text.startswith('data: '):
96
+ line_text = line_text[6:]
97
+
98
+ data = json.loads(line_text)
99
+ if "content" in data:
100
+ content = data["content"]
101
+ streaming_text += content
102
+ completion_tokens += count_tokens(content)
103
+
104
+ # Create a delta object for this chunk
105
+ delta = ChoiceDelta(content=content)
106
+ choice = Choice(index=0, delta=delta, finish_reason=None)
107
+
108
+ chunk = ChatCompletionChunk(
109
+ id=request_id,
110
+ choices=[choice],
111
+ created=created_time,
112
+ model=model,
113
+ )
114
+
115
+ yield chunk
116
+ except (json.JSONDecodeError, UnicodeDecodeError):
117
+ continue
118
+
119
+ # Final chunk with finish_reason
120
+ delta = ChoiceDelta(content=None)
121
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
122
+
123
+ chunk = ChatCompletionChunk(
124
+ id=request_id,
125
+ choices=[choice],
126
+ created=created_time,
127
+ model=model,
128
+ )
129
+
130
+ yield chunk
131
+
132
+ except requests.exceptions.RequestException as e:
133
+ print(f"{RED}Error during SonusAI stream request: {e}{RESET}")
134
+ raise IOError(f"SonusAI request failed: {e}") from e
135
+
136
+ def _create_non_stream(
137
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
138
+ ) -> ChatCompletion:
139
+ try:
140
+ response = requests.post(
141
+ self._client.url,
142
+ files=files,
143
+ headers=self._client.headers,
144
+ timeout=timeout or self._client.timeout,
145
+ proxies=proxies or getattr(self._client, "proxies", None)
146
+ )
147
+ response.raise_for_status()
148
+
149
+ full_response = ""
150
+ for line in response.iter_lines():
151
+ if line:
152
+ try:
153
+ line_text = line.decode('utf-8')
154
+ if line_text.startswith('data: '):
155
+ line_text = line_text[6:]
156
+ data = json.loads(line_text)
157
+ if "content" in data:
158
+ full_response += data["content"]
159
+ except (json.JSONDecodeError, UnicodeDecodeError):
160
+ continue
161
+
162
+ # Create usage statistics using count_tokens
163
+ prompt_tokens = count_tokens(files.get('message', ['',''])[1])
164
+ completion_tokens = count_tokens(full_response)
165
+ total_tokens = prompt_tokens + completion_tokens
166
+
167
+ usage = CompletionUsage(
168
+ prompt_tokens=prompt_tokens,
169
+ completion_tokens=completion_tokens,
170
+ total_tokens=total_tokens
171
+ )
172
+
173
+ # Create the message object
174
+ message = ChatCompletionMessage(
175
+ role="assistant",
176
+ content=full_response
177
+ )
178
+
179
+ # Create the choice object
180
+ choice = Choice(
181
+ index=0,
182
+ message=message,
183
+ finish_reason="stop"
184
+ )
185
+
186
+ # Create the completion object
187
+ completion = ChatCompletion(
188
+ id=request_id,
189
+ choices=[choice],
190
+ created=created_time,
191
+ model=model,
192
+ usage=usage,
193
+ )
194
+
195
+ return completion
196
+
197
+ except Exception as e:
198
+ print(f"{RED}Error during SonusAI non-stream request: {e}{RESET}")
199
+ raise IOError(f"SonusAI request failed: {e}") from e
200
+
201
+ class Chat(BaseChat):
202
+ def __init__(self, client: 'SonusAI'):
203
+ self.completions = Completions(client)
204
+
205
+ class SonusAI(OpenAICompatibleProvider):
206
+ """
207
+ OpenAI-compatible client for Sonus AI API.
208
+
209
+ Usage:
210
+ client = SonusAI()
211
+ response = client.chat.completions.create(
212
+ model="pro",
213
+ messages=[{"role": "user", "content": "Hello!"}]
214
+ )
215
+ print(response.choices[0].message.content)
216
+ """
217
+
218
+ AVAILABLE_MODELS = [
219
+ "pro",
220
+ "air",
221
+ "mini"
222
+ ]
223
+
224
+ def __init__(
225
+ self,
226
+ timeout: int = 30
227
+ ):
228
+ """
229
+ Initialize the SonusAI client.
230
+
231
+ Args:
232
+ timeout: Request timeout in seconds.
233
+ """
234
+ self.timeout = timeout
235
+ self.url = "https://chat.sonus.ai/chat.php"
236
+
237
+ # Headers for the request
238
+ agent = LitAgent()
239
+ self.headers = {
240
+ 'Accept': '*/*',
241
+ 'Accept-Language': 'en-US,en;q=0.9',
242
+ 'Origin': 'https://chat.sonus.ai',
243
+ 'Referer': 'https://chat.sonus.ai/',
244
+ 'User-Agent': agent.random()
245
+ }
246
+
247
+ self.session = requests.Session()
248
+ self.session.headers.update(self.headers)
249
+
250
+ # Initialize the chat interface
251
+ self.chat = Chat(self)
252
+
253
+ def convert_model_name(self, model: str) -> str:
254
+ """
255
+ Ensure the model name is in the correct format.
256
+ """
257
+ if model in self.AVAILABLE_MODELS:
258
+ return model
259
+
260
+ # Try to find a matching model
261
+ for available_model in self.AVAILABLE_MODELS:
262
+ if model.lower() in available_model.lower():
263
+ return available_model
264
+
265
+ # Default to pro if no match
266
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'pro'{RESET}")
267
+ return "pro"
268
+
269
+ @property
270
+ def models(self):
271
+ class _ModelList:
272
+ def list(inner_self):
273
+ return type(self).AVAILABLE_MODELS
274
+ return _ModelList()
275
+
276
+
277
+
278
+
279
+ # Simple test if run directly
280
+ if __name__ == "__main__":
281
+ print("-" * 80)
282
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
283
+ print("-" * 80)
284
+
285
+ for model in SonusAI.AVAILABLE_MODELS:
286
+ try:
287
+ client = SonusAI(timeout=60)
288
+ # Test with a simple conversation to demonstrate format_prompt usage
289
+ response = client.chat.completions.create(
290
+ model=model,
291
+ messages=[
292
+ {"role": "system", "content": "You are a helpful assistant."},
293
+ {"role": "user", "content": "Say 'Hello' in one word"},
294
+ ],
295
+ stream=False
296
+ )
297
+
298
+ if response and response.choices and response.choices[0].message.content:
299
+ status = ""
300
+ # Truncate response if too long
301
+ display_text = response.choices[0].message.content.strip()
302
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
303
+ else:
304
+ status = "✗"
305
+ display_text = "Empty or invalid response"
306
+ print(f"{model:<50} {status:<10} {display_text}")
307
+ except Exception as e:
308
+ print(f"{model:<50} {'✗':<10} {str(e)}")