webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,443 +1,448 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- from webscout.litagent import LitAgent
8
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
- from .utils import (
10
- ChatCompletion,
11
- ChatCompletionChunk,
12
- Choice,
13
- ChatCompletionMessage,
14
- ChoiceDelta,
15
- CompletionUsage,
16
- format_prompt
17
- )
18
-
19
- # ANSI escape codes for formatting
20
- BOLD = "\033[1m"
21
- RED = "\033[91m"
22
- RESET = "\033[0m"
23
-
24
- # Model configurations
25
- MODEL_CONFIGS = {
26
- "exaanswer": {
27
- "endpoint": "https://ayle.chat/api/exaanswer",
28
- "models": ["exaanswer"],
29
- },
30
- "gemini": {
31
- "endpoint": "https://ayle.chat/api/gemini",
32
- "models": [
33
- "gemini-2.0-flash",
34
- "gemini-2.0-flash-exp-image-generation",
35
- "gemini-2.0-flash-thinking-exp-01-21",
36
- "gemini-2.5-pro-exp-03-25",
37
- "gemini-2.0-pro-exp-02-05",
38
- "gemini-2.5-flash-preview-04-17",
39
-
40
-
41
- ],
42
- },
43
- "openrouter": {
44
- "endpoint": "https://ayle.chat/api/openrouter",
45
- "models": [
46
- "mistralai/mistral-small-3.1-24b-instruct:free",
47
- "deepseek/deepseek-r1:free",
48
- "deepseek/deepseek-chat-v3-0324:free",
49
- "google/gemma-3-27b-it:free",
50
- "meta-llama/llama-4-maverick:free",
51
- ],
52
- },
53
- "groq": {
54
- "endpoint": "https://ayle.chat/api/groq",
55
- "models": [
56
- "deepseek-r1-distill-llama-70b",
57
- "deepseek-r1-distill-qwen-32b",
58
- "gemma2-9b-it",
59
- "llama-3.1-8b-instant",
60
- "llama-3.2-1b-preview",
61
- "llama-3.2-3b-preview",
62
- "llama-3.2-90b-vision-preview",
63
- "llama-3.3-70b-specdec",
64
- "llama-3.3-70b-versatile",
65
- "llama3-70b-8192",
66
- "llama3-8b-8192",
67
- "qwen-2.5-32b",
68
- "qwen-2.5-coder-32b",
69
- "qwen-qwq-32b",
70
- "meta-llama/llama-4-scout-17b-16e-instruct"
71
- ],
72
- },
73
- "cerebras": {
74
- "endpoint": "https://ayle.chat/api/cerebras",
75
- "models": [
76
- "llama3.1-8b",
77
- "llama-3.3-70b"
78
- ],
79
- },
80
- "xai": {
81
- "endpoint": "https://ayle.chat/api/xai",
82
- "models": [
83
- "grok-3-mini-beta"
84
- ],
85
- },
86
- }
87
-
88
-
89
- class Completions(BaseCompletions):
90
- def __init__(self, client: 'ExaChat'):
91
- self._client = client
92
-
93
- def create(
94
- self,
95
- *,
96
- model: str,
97
- messages: List[Dict[str, str]],
98
- max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
99
- stream: bool = False,
100
- temperature: Optional[float] = None,
101
- top_p: Optional[float] = None,
102
- **kwargs: Any
103
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
104
- """
105
- Creates a model response for the given chat conversation.
106
- Mimics openai.chat.completions.create
107
- """
108
- # Format the messages using the format_prompt utility
109
- # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
110
- question = format_prompt(messages, add_special_tokens=True, do_continue=True)
111
-
112
- # Determine the provider based on the model
113
- provider = self._client._get_provider_from_model(model)
114
-
115
- # Build the appropriate payload based on the provider
116
- if provider == "exaanswer":
117
- payload = {
118
- "query": question,
119
- "messages": []
120
- }
121
- elif provider in ["gemini", "cerebras"]:
122
- payload = {
123
- "query": question,
124
- "model": model,
125
- "messages": []
126
- }
127
- else: # openrouter or groq
128
- payload = {
129
- "query": question + "\n", # Add newline for openrouter and groq models
130
- "model": model,
131
- "messages": []
132
- }
133
-
134
- request_id = f"chatcmpl-{uuid.uuid4()}"
135
- created_time = int(time.time())
136
-
137
- if stream:
138
- return self._create_stream(request_id, created_time, model, provider, payload)
139
- else:
140
- return self._create_non_stream(request_id, created_time, model, provider, payload)
141
-
142
- def _create_stream(
143
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
144
- ) -> Generator[ChatCompletionChunk, None, None]:
145
- try:
146
- endpoint = self._client._get_endpoint(provider)
147
- response = self._client.session.post(
148
- endpoint,
149
- headers=self._client.headers,
150
- json=payload,
151
- stream=True,
152
- timeout=self._client.timeout
153
- )
154
- response.raise_for_status()
155
-
156
- # Track token usage across chunks
157
- completion_tokens = 0
158
- streaming_text = ""
159
-
160
- for line in response.iter_lines():
161
- if not line:
162
- continue
163
-
164
- try:
165
- data = json.loads(line.decode('utf-8'))
166
- if 'choices' in data and len(data['choices']) > 0:
167
- content = data['choices'][0].get('delta', {}).get('content', '')
168
- if content:
169
- streaming_text += content
170
- completion_tokens += len(content) // 4 # Rough estimate
171
-
172
- # Create a delta object for this chunk
173
- delta = ChoiceDelta(content=content)
174
- choice = Choice(index=0, delta=delta, finish_reason=None)
175
-
176
- chunk = ChatCompletionChunk(
177
- id=request_id,
178
- choices=[choice],
179
- created=created_time,
180
- model=model,
181
- )
182
-
183
- yield chunk
184
- except json.JSONDecodeError:
185
- continue
186
-
187
- # Final chunk with finish_reason
188
- delta = ChoiceDelta(content=None)
189
- choice = Choice(index=0, delta=delta, finish_reason="stop")
190
-
191
- chunk = ChatCompletionChunk(
192
- id=request_id,
193
- choices=[choice],
194
- created=created_time,
195
- model=model,
196
- )
197
-
198
- yield chunk
199
-
200
- except requests.exceptions.RequestException as e:
201
- print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
202
- raise IOError(f"ExaChat request failed: {e}") from e
203
-
204
- def _create_non_stream(
205
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
206
- ) -> ChatCompletion:
207
- try:
208
- endpoint = self._client._get_endpoint(provider)
209
- response = self._client.session.post(
210
- endpoint,
211
- headers=self._client.headers,
212
- json=payload,
213
- timeout=self._client.timeout
214
- )
215
- response.raise_for_status()
216
-
217
- full_response = ""
218
- for line in response.iter_lines():
219
- if line:
220
- try:
221
- data = json.loads(line.decode('utf-8'))
222
- if 'choices' in data and len(data['choices']) > 0:
223
- content = data['choices'][0].get('delta', {}).get('content', '')
224
- if content:
225
- full_response += content
226
- except json.JSONDecodeError:
227
- continue
228
-
229
- # Create usage statistics (estimated)
230
- prompt_tokens = len(payload["query"]) // 4
231
- completion_tokens = len(full_response) // 4
232
- total_tokens = prompt_tokens + completion_tokens
233
-
234
- usage = CompletionUsage(
235
- prompt_tokens=prompt_tokens,
236
- completion_tokens=completion_tokens,
237
- total_tokens=total_tokens
238
- )
239
-
240
- # Create the message object
241
- message = ChatCompletionMessage(
242
- role="assistant",
243
- content=full_response
244
- )
245
-
246
- # Create the choice object
247
- choice = Choice(
248
- index=0,
249
- message=message,
250
- finish_reason="stop"
251
- )
252
-
253
- # Create the completion object
254
- completion = ChatCompletion(
255
- id=request_id,
256
- choices=[choice],
257
- created=created_time,
258
- model=model,
259
- usage=usage,
260
- )
261
-
262
- return completion
263
-
264
- except Exception as e:
265
- print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
266
- raise IOError(f"ExaChat request failed: {e}") from e
267
-
268
- class Chat(BaseChat):
269
- def __init__(self, client: 'ExaChat'):
270
- self.completions = Completions(client)
271
-
272
- class ExaChat(OpenAICompatibleProvider):
273
- """
274
- OpenAI-compatible client for ExaChat API.
275
-
276
- Usage:
277
- client = ExaChat()
278
- response = client.chat.completions.create(
279
- model="exaanswer",
280
- messages=[{"role": "user", "content": "Hello!"}]
281
- )
282
- print(response.choices[0].message.content)
283
- """
284
-
285
- AVAILABLE_MODELS = [
286
- # ExaAnswer Models
287
- "exaanswer",
288
-
289
- # XAI Models
290
- "grok-3-mini-beta",
291
-
292
- # Gemini Models
293
- "gemini-2.0-flash",
294
- "gemini-2.0-flash-exp-image-generation",
295
- "gemini-2.0-flash-thinking-exp-01-21",
296
- "gemini-2.5-pro-exp-03-25",
297
- "gemini-2.0-pro-exp-02-05",
298
- "gemini-2.5-flash-preview-04-17",
299
-
300
- # OpenRouter Models
301
- "mistralai/mistral-small-3.1-24b-instruct:free",
302
- "deepseek/deepseek-r1:free",
303
- "deepseek/deepseek-chat-v3-0324:free",
304
- "google/gemma-3-27b-it:free",
305
- "meta-llama/llama-4-maverick:free",
306
-
307
- # Groq Models
308
- "deepseek-r1-distill-llama-70b",
309
- "deepseek-r1-distill-qwen-32b",
310
- "gemma2-9b-it",
311
- "llama-3.1-8b-instant",
312
- "llama-3.2-1b-preview",
313
- "llama-3.2-3b-preview",
314
- "llama-3.2-90b-vision-preview",
315
- "llama-3.3-70b-specdec",
316
- "llama-3.3-70b-versatile",
317
- "llama3-70b-8192",
318
- "llama3-8b-8192",
319
- "qwen-2.5-32b",
320
- "qwen-2.5-coder-32b",
321
- "qwen-qwq-32b",
322
- "meta-llama/llama-4-scout-17b-16e-instruct",
323
-
324
-
325
- # Cerebras Models
326
- "llama3.1-8b",
327
- "llama-3.3-70b",
328
-
329
- ]
330
-
331
- def __init__(
332
- self,
333
- timeout: int = 30,
334
- temperature: float = 0.5,
335
- top_p: float = 1.0
336
- ):
337
- """
338
- Initialize the ExaChat client.
339
-
340
- Args:
341
- timeout: Request timeout in seconds.
342
- temperature: Temperature for response generation.
343
- top_p: Top-p sampling parameter.
344
- """
345
- self.timeout = timeout
346
- self.temperature = temperature
347
- self.top_p = top_p
348
-
349
- # Initialize LitAgent for user agent generation
350
- agent = LitAgent()
351
-
352
- self.headers = {
353
- "accept": "*/*",
354
- "accept-language": "en-US,en;q=0.9",
355
- "content-type": "application/json",
356
- "origin": "https://ayle.chat/",
357
- "referer": "https://ayle.chat//",
358
- "user-agent": agent.random(),
359
- }
360
-
361
- self.session = requests.Session()
362
- self.session.headers.update(self.headers)
363
- self.session.cookies.update({"session": uuid.uuid4().hex})
364
-
365
- # Initialize the chat interface
366
- self.chat = Chat(self)
367
-
368
- @property
369
- def models(self):
370
- class _ModelList:
371
- def list(inner_self):
372
- return type(self).AVAILABLE_MODELS
373
- return _ModelList()
374
- def _get_endpoint(self, provider: str) -> str:
375
- """Get the API endpoint for the specified provider."""
376
- return MODEL_CONFIGS[provider]["endpoint"]
377
-
378
- def _get_provider_from_model(self, model: str) -> str:
379
- """Determine the provider based on the model name."""
380
- for provider, config in MODEL_CONFIGS.items():
381
- if model in config["models"]:
382
- return provider
383
-
384
- # If model not found, use a default model
385
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
386
- return "exaanswer"
387
-
388
- def convert_model_name(self, model: str) -> str:
389
- """
390
- Ensure the model name is in the correct format.
391
- """
392
- if model in self.AVAILABLE_MODELS:
393
- return model
394
-
395
- # Try to find a matching model
396
- for available_model in self.AVAILABLE_MODELS:
397
- if model.lower() in available_model.lower():
398
- return available_model
399
-
400
- # Default to exaanswer if no match
401
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
402
- return "exaanswer"
403
-
404
-
405
- # Simple test if run directly
406
- if __name__ == "__main__":
407
- print("-" * 80)
408
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
409
- print("-" * 80)
410
-
411
- # Test a subset of models to avoid excessive API calls
412
- test_models = [
413
- "exaanswer",
414
- "gemini-2.0-flash",
415
- "deepseek/deepseek-r1:free",
416
- "llama-3.1-8b-instant",
417
- "llama3.1-8b"
418
- ]
419
-
420
- for model in test_models:
421
- try:
422
- client = ExaChat(timeout=60)
423
- # Test with a simple conversation to demonstrate format_prompt usage
424
- response = client.chat.completions.create(
425
- model=model,
426
- messages=[
427
- {"role": "system", "content": "You are a helpful assistant."},
428
- {"role": "user", "content": "Say 'Hello' in one word"},
429
- ],
430
- stream=False
431
- )
432
-
433
- if response and response.choices and response.choices[0].message.content:
434
- status = "✓"
435
- # Truncate response if too long
436
- display_text = response.choices[0].message.content.strip()
437
- display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
438
- else:
439
- status = ""
440
- display_text = "Empty or invalid response"
441
- print(f"{model:<50} {status:<10} {display_text}")
442
- except Exception as e:
443
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt,
17
+ count_tokens
18
+ )
19
+
20
+ # ANSI escape codes for formatting
21
+ BOLD = "\033[1m"
22
+ RED = "\033[91m"
23
+ RESET = "\033[0m"
24
+
25
+ # Model configurations
26
+ MODEL_CONFIGS = {
27
+ "exaanswer": {
28
+ "endpoint": "https://ayle.chat/api/exaanswer",
29
+ "models": ["exaanswer"],
30
+ },
31
+ "gemini": {
32
+ "endpoint": "https://ayle.chat/api/gemini",
33
+ "models": [
34
+ "gemini-2.0-flash",
35
+ "gemini-2.0-flash-exp-image-generation",
36
+ "gemini-2.0-flash-thinking-exp-01-21",
37
+ "gemini-2.5-pro-exp-03-25",
38
+ "gemini-2.0-pro-exp-02-05",
39
+ "gemini-2.5-flash-preview-04-17",
40
+
41
+
42
+ ],
43
+ },
44
+ "openrouter": {
45
+ "endpoint": "https://ayle.chat/api/openrouter",
46
+ "models": [
47
+ "mistralai/mistral-small-3.1-24b-instruct:free",
48
+ "deepseek/deepseek-r1:free",
49
+ "deepseek/deepseek-chat-v3-0324:free",
50
+ "google/gemma-3-27b-it:free",
51
+ "meta-llama/llama-4-maverick:free",
52
+ ],
53
+ },
54
+ "groq": {
55
+ "endpoint": "https://ayle.chat/api/groq",
56
+ "models": [
57
+ "deepseek-r1-distill-llama-70b",
58
+ "deepseek-r1-distill-qwen-32b",
59
+ "gemma2-9b-it",
60
+ "llama-3.1-8b-instant",
61
+ "llama-3.2-1b-preview",
62
+ "llama-3.2-3b-preview",
63
+ "llama-3.2-90b-vision-preview",
64
+ "llama-3.3-70b-specdec",
65
+ "llama-3.3-70b-versatile",
66
+ "llama3-70b-8192",
67
+ "llama3-8b-8192",
68
+ "qwen-2.5-32b",
69
+ "qwen-2.5-coder-32b",
70
+ "qwen-qwq-32b",
71
+ "meta-llama/llama-4-scout-17b-16e-instruct"
72
+ ],
73
+ },
74
+ "cerebras": {
75
+ "endpoint": "https://ayle.chat/api/cerebras",
76
+ "models": [
77
+ "llama3.1-8b",
78
+ "llama-3.3-70b"
79
+ ],
80
+ },
81
+ "xai": {
82
+ "endpoint": "https://ayle.chat/api/xai",
83
+ "models": [
84
+ "grok-3-mini-beta"
85
+ ],
86
+ },
87
+ }
88
+
89
+
90
+ class Completions(BaseCompletions):
91
+ def __init__(self, client: 'ExaChat'):
92
+ self._client = client
93
+
94
+ def create(
95
+ self,
96
+ *,
97
+ model: str,
98
+ messages: List[Dict[str, str]],
99
+ max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
100
+ stream: bool = False,
101
+ temperature: Optional[float] = None,
102
+ top_p: Optional[float] = None,
103
+ timeout: Optional[int] = None,
104
+ proxies: Optional[Dict[str, str]] = None,
105
+ **kwargs: Any
106
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
107
+ """
108
+ Creates a model response for the given chat conversation.
109
+ Mimics openai.chat.completions.create
110
+ """
111
+ # Format the messages using the format_prompt utility
112
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
113
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
114
+
115
+ # Determine the provider based on the model
116
+ provider = self._client._get_provider_from_model(model)
117
+
118
+ # Build the appropriate payload based on the provider
119
+ if provider == "exaanswer":
120
+ payload = {
121
+ "query": question,
122
+ "messages": []
123
+ }
124
+ elif provider in ["gemini", "cerebras"]:
125
+ payload = {
126
+ "query": question,
127
+ "model": model,
128
+ "messages": []
129
+ }
130
+ else: # openrouter or groq
131
+ payload = {
132
+ "query": question + "\n", # Add newline for openrouter and groq models
133
+ "model": model,
134
+ "messages": []
135
+ }
136
+
137
+ request_id = f"chatcmpl-{uuid.uuid4()}"
138
+ created_time = int(time.time())
139
+
140
+ if stream:
141
+ return self._create_stream(request_id, created_time, model, provider, payload, timeout, proxies)
142
+ else:
143
+ return self._create_non_stream(request_id, created_time, model, provider, payload, timeout, proxies)
144
+
145
+ def _create_stream(
146
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
147
+ ) -> Generator[ChatCompletionChunk, None, None]:
148
+ try:
149
+ endpoint = self._client._get_endpoint(provider)
150
+ response = self._client.session.post(
151
+ endpoint,
152
+ headers=self._client.headers,
153
+ json=payload,
154
+ stream=True,
155
+ timeout=timeout or self._client.timeout,
156
+ proxies=proxies or getattr(self._client, "proxies", None)
157
+ )
158
+ response.raise_for_status()
159
+
160
+ # Track token usage across chunks
161
+ completion_tokens = 0
162
+ streaming_text = ""
163
+
164
+ for line in response.iter_lines():
165
+ if not line:
166
+ continue
167
+
168
+ try:
169
+ data = json.loads(line.decode('utf-8'))
170
+ if 'choices' in data and len(data['choices']) > 0:
171
+ content = data['choices'][0].get('delta', {}).get('content', '')
172
+ if content:
173
+ streaming_text += content
174
+ completion_tokens += count_tokens(content)
175
+
176
+ # Create a delta object for this chunk
177
+ delta = ChoiceDelta(content=content)
178
+ choice = Choice(index=0, delta=delta, finish_reason=None)
179
+
180
+ chunk = ChatCompletionChunk(
181
+ id=request_id,
182
+ choices=[choice],
183
+ created=created_time,
184
+ model=model,
185
+ )
186
+
187
+ yield chunk
188
+ except json.JSONDecodeError:
189
+ continue
190
+
191
+ # Final chunk with finish_reason
192
+ delta = ChoiceDelta(content=None)
193
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
194
+
195
+ chunk = ChatCompletionChunk(
196
+ id=request_id,
197
+ choices=[choice],
198
+ created=created_time,
199
+ model=model,
200
+ )
201
+
202
+ yield chunk
203
+
204
+ except requests.exceptions.RequestException as e:
205
+ print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
206
+ raise IOError(f"ExaChat request failed: {e}") from e
207
+
208
+ def _create_non_stream(
209
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
210
+ ) -> ChatCompletion:
211
+ try:
212
+ endpoint = self._client._get_endpoint(provider)
213
+ response = self._client.session.post(
214
+ endpoint,
215
+ headers=self._client.headers,
216
+ json=payload,
217
+ timeout=timeout or self._client.timeout,
218
+ proxies=proxies or getattr(self._client, "proxies", None)
219
+ )
220
+ response.raise_for_status()
221
+
222
+ full_response = ""
223
+ for line in response.iter_lines():
224
+ if line:
225
+ try:
226
+ data = json.loads(line.decode('utf-8'))
227
+ if 'choices' in data and len(data['choices']) > 0:
228
+ content = data['choices'][0].get('delta', {}).get('content', '')
229
+ if content:
230
+ full_response += content
231
+ except json.JSONDecodeError:
232
+ continue
233
+
234
+ # Create usage statistics (estimated)
235
+ prompt_tokens = count_tokens(payload.get("query", ""))
236
+ completion_tokens = count_tokens(full_response)
237
+ total_tokens = prompt_tokens + completion_tokens
238
+
239
+ usage = CompletionUsage(
240
+ prompt_tokens=prompt_tokens,
241
+ completion_tokens=completion_tokens,
242
+ total_tokens=total_tokens
243
+ )
244
+
245
+ # Create the message object
246
+ message = ChatCompletionMessage(
247
+ role="assistant",
248
+ content=full_response
249
+ )
250
+
251
+ # Create the choice object
252
+ choice = Choice(
253
+ index=0,
254
+ message=message,
255
+ finish_reason="stop"
256
+ )
257
+
258
+ # Create the completion object
259
+ completion = ChatCompletion(
260
+ id=request_id,
261
+ choices=[choice],
262
+ created=created_time,
263
+ model=model,
264
+ usage=usage,
265
+ )
266
+
267
+ return completion
268
+
269
+ except Exception as e:
270
+ print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
271
+ raise IOError(f"ExaChat request failed: {e}") from e
272
+
273
+ class Chat(BaseChat):
274
+ def __init__(self, client: 'ExaChat'):
275
+ self.completions = Completions(client)
276
+
277
+ class ExaChat(OpenAICompatibleProvider):
278
+ """
279
+ OpenAI-compatible client for ExaChat API.
280
+
281
+ Usage:
282
+ client = ExaChat()
283
+ response = client.chat.completions.create(
284
+ model="exaanswer",
285
+ messages=[{"role": "user", "content": "Hello!"}]
286
+ )
287
+ print(response.choices[0].message.content)
288
+ """
289
+
290
+ AVAILABLE_MODELS = [
291
+ # ExaAnswer Models
292
+ "exaanswer",
293
+
294
+ # XAI Models
295
+ "grok-3-mini-beta",
296
+
297
+ # Gemini Models
298
+ "gemini-2.0-flash",
299
+ "gemini-2.0-flash-exp-image-generation",
300
+ "gemini-2.0-flash-thinking-exp-01-21",
301
+ "gemini-2.5-pro-exp-03-25",
302
+ "gemini-2.0-pro-exp-02-05",
303
+ "gemini-2.5-flash-preview-04-17",
304
+
305
+ # OpenRouter Models
306
+ "mistralai/mistral-small-3.1-24b-instruct:free",
307
+ "deepseek/deepseek-r1:free",
308
+ "deepseek/deepseek-chat-v3-0324:free",
309
+ "google/gemma-3-27b-it:free",
310
+ "meta-llama/llama-4-maverick:free",
311
+
312
+ # Groq Models
313
+ "deepseek-r1-distill-llama-70b",
314
+ "deepseek-r1-distill-qwen-32b",
315
+ "gemma2-9b-it",
316
+ "llama-3.1-8b-instant",
317
+ "llama-3.2-1b-preview",
318
+ "llama-3.2-3b-preview",
319
+ "llama-3.2-90b-vision-preview",
320
+ "llama-3.3-70b-specdec",
321
+ "llama-3.3-70b-versatile",
322
+ "llama3-70b-8192",
323
+ "llama3-8b-8192",
324
+ "qwen-2.5-32b",
325
+ "qwen-2.5-coder-32b",
326
+ "qwen-qwq-32b",
327
+ "meta-llama/llama-4-scout-17b-16e-instruct",
328
+
329
+
330
+ # Cerebras Models
331
+ "llama3.1-8b",
332
+ "llama-3.3-70b",
333
+
334
+ ]
335
+
336
+ def __init__(
337
+ self,
338
+ timeout: int = 30,
339
+ temperature: float = 0.5,
340
+ top_p: float = 1.0
341
+ ):
342
+ """
343
+ Initialize the ExaChat client.
344
+
345
+ Args:
346
+ timeout: Request timeout in seconds.
347
+ temperature: Temperature for response generation.
348
+ top_p: Top-p sampling parameter.
349
+ """
350
+ self.timeout = timeout
351
+ self.temperature = temperature
352
+ self.top_p = top_p
353
+
354
+ # Initialize LitAgent for user agent generation
355
+ agent = LitAgent()
356
+
357
+ self.headers = {
358
+ "accept": "*/*",
359
+ "accept-language": "en-US,en;q=0.9",
360
+ "content-type": "application/json",
361
+ "origin": "https://ayle.chat/",
362
+ "referer": "https://ayle.chat//",
363
+ "user-agent": agent.random(),
364
+ }
365
+
366
+ self.session = requests.Session()
367
+ self.session.headers.update(self.headers)
368
+ self.session.cookies.update({"session": uuid.uuid4().hex})
369
+
370
+ # Initialize the chat interface
371
+ self.chat = Chat(self)
372
+
373
+ @property
374
+ def models(self):
375
+ class _ModelList:
376
+ def list(inner_self):
377
+ return type(self).AVAILABLE_MODELS
378
+ return _ModelList()
379
+ def _get_endpoint(self, provider: str) -> str:
380
+ """Get the API endpoint for the specified provider."""
381
+ return MODEL_CONFIGS[provider]["endpoint"]
382
+
383
+ def _get_provider_from_model(self, model: str) -> str:
384
+ """Determine the provider based on the model name."""
385
+ for provider, config in MODEL_CONFIGS.items():
386
+ if model in config["models"]:
387
+ return provider
388
+
389
+ # If model not found, use a default model
390
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
391
+ return "exaanswer"
392
+
393
+ def convert_model_name(self, model: str) -> str:
394
+ """
395
+ Ensure the model name is in the correct format.
396
+ """
397
+ if model in self.AVAILABLE_MODELS:
398
+ return model
399
+
400
+ # Try to find a matching model
401
+ for available_model in self.AVAILABLE_MODELS:
402
+ if model.lower() in available_model.lower():
403
+ return available_model
404
+
405
+ # Default to exaanswer if no match
406
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
407
+ return "exaanswer"
408
+
409
+
410
+ # Simple test if run directly
411
+ if __name__ == "__main__":
412
+ print("-" * 80)
413
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
414
+ print("-" * 80)
415
+
416
+ # Test a subset of models to avoid excessive API calls
417
+ test_models = [
418
+ "exaanswer",
419
+ "gemini-2.0-flash",
420
+ "deepseek/deepseek-r1:free",
421
+ "llama-3.1-8b-instant",
422
+ "llama3.1-8b"
423
+ ]
424
+
425
+ for model in test_models:
426
+ try:
427
+ client = ExaChat(timeout=60)
428
+ # Test with a simple conversation to demonstrate format_prompt usage
429
+ response = client.chat.completions.create(
430
+ model=model,
431
+ messages=[
432
+ {"role": "system", "content": "You are a helpful assistant."},
433
+ {"role": "user", "content": "Say 'Hello' in one word"},
434
+ ],
435
+ stream=False
436
+ )
437
+
438
+ if response and response.choices and response.choices[0].message.content:
439
+ status = ""
440
+ # Truncate response if too long
441
+ display_text = response.choices[0].message.content.strip()
442
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
443
+ else:
444
+ status = "✗"
445
+ display_text = "Empty or invalid response"
446
+ print(f"{model:<50} {status:<10} {display_text}")
447
+ except Exception as e:
448
+ print(f"{model:<50} {'✗':<10} {str(e)}")